code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "vit_msn"
def __init__( self , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-0_6 , _a=2_2_4 , _a=1_6 , _a=3 , _a=True , **_a , ) -> int:
super().__init__(**_a )
_a : List[str] = hidden_size
_a : int = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : List[str] = hidden_act
_a : int = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : Optional[Any] = initializer_range
_a : List[str] = layer_norm_eps
_a : Optional[int] = image_size
_a : List[Any] = patch_size
_a : Optional[Any] = num_channels
_a : str = qkv_bias
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[str] = tau * frequency / samplerate
_a : int = sin(__a )
_a : str = cos(__a )
_a : Optional[int] = _sin / (2 * q_factor)
_a : List[Any] = (1 - _cos) / 2
_a : Optional[int] = 1 - _cos
_a : Union[str, Any] = 1 + alpha
_a : Dict = -2 * _cos
_a : Any = 1 - alpha
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Union[str, Any] = sin(__a )
_a : Optional[int] = cos(__a )
_a : Optional[Any] = _sin / (2 * q_factor)
_a : List[Any] = (1 + _cos) / 2
_a : List[str] = -1 - _cos
_a : Tuple = 1 + alpha
_a : Optional[int] = -2 * _cos
_a : Optional[Any] = 1 - alpha
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Union[str, Any] = sin(__a )
_a : Optional[int] = cos(__a )
_a : str = _sin / (2 * q_factor)
_a : Optional[Any] = _sin / 2
_a : str = 0
_a : List[str] = -ba
_a : Tuple = 1 + alpha
_a : Any = -2 * _cos
_a : Any = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[str] = tau * frequency / samplerate
_a : Any = sin(__a )
_a : Optional[int] = cos(__a )
_a : Any = _sin / (2 * q_factor)
_a : int = 1 - alpha
_a : List[str] = -2 * _cos
_a : Optional[Any] = 1 + alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Optional[Any] = sin(__a )
_a : Optional[Any] = cos(__a )
_a : Optional[int] = _sin / (2 * q_factor)
_a : Dict = 10 ** (gain_db / 40)
_a : List[Any] = 1 + alpha * big_a
_a : int = -2 * _cos
_a : Dict = 1 - alpha * big_a
_a : Optional[Any] = 1 + alpha / big_a
_a : Dict = -2 * _cos
_a : int = 1 - alpha / big_a
_a : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : Optional[Any] = tau * frequency / samplerate
_a : List[Any] = sin(__a )
_a : Any = cos(__a )
_a : str = _sin / (2 * q_factor)
_a : Union[str, Any] = 10 ** (gain_db / 40)
_a : Tuple = (big_a + 1) - (big_a - 1) * _cos
_a : Tuple = (big_a + 1) + (big_a - 1) * _cos
_a : Dict = (big_a - 1) - (big_a + 1) * _cos
_a : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(__a ) * alpha
_a : List[str] = big_a * (pmc + aaa)
_a : List[str] = 2 * big_a * mpc
_a : Union[str, Any] = big_a * (pmc - aaa)
_a : Optional[Any] = ppmc + aaa
_a : str = -2 * pmpc
_a : str = ppmc - aaa
_a : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : int = tau * frequency / samplerate
_a : Tuple = sin(__a )
_a : Optional[int] = cos(__a )
_a : Dict = _sin / (2 * q_factor)
_a : Tuple = 10 ** (gain_db / 40)
_a : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[str] = (big_a - 1) - (big_a + 1) * _cos
_a : Tuple = (big_a - 1) + (big_a + 1) * _cos
_a : List[Any] = 2 * sqrt(__a ) * alpha
_a : Any = big_a * (ppmc + aaa)
_a : Dict = -2 * big_a * pmpc
_a : Tuple = big_a * (ppmc - aaa)
_a : List[str] = pmc + aaa
_a : List[str] = 2 * mpc
_a : Optional[Any] = pmc - aaa
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 14 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ = TypeVar('''T''')
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_a : list[tuple[T, int]] = []
_a : dict[T, int] = {}
_a : int = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __lowercase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __lowercase ( self , _a , _a ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(_a )
def __lowercase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a : str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a : List[Any] = self.heap[0]
self._bubble_down(_a )
return elem
def __lowercase ( self , _a , _a ) -> None:
# Update the weight of the given key
_a : Optional[int] = self.position_map[elem]
_a : Any = (elem, weight)
if position > 0:
_a : Any = get_parent_position(_a )
_a , _a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_a )
else:
self._bubble_down(_a )
else:
self._bubble_down(_a )
def __lowercase ( self , _a ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
_a : str = get_parent_position(_a )
_a , _a : List[str] = self.heap[curr_pos]
_a , _a : List[str] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_a , _a )
return self._bubble_up(_a )
return None
def __lowercase ( self , _a ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a : Union[str, Any] = self.position_map[elem]
_a , _a : List[str] = self.heap[curr_pos]
_a : int = get_child_left_position(_a )
_a : Optional[int] = get_child_right_position(_a )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a : Union[str, Any] = self.heap[child_left_position]
_a , _a : Tuple = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
if child_left_position < self.elements:
_a , _a : List[str] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
else:
return None
if child_right_position < self.elements:
_a , _a : Optional[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
return None
def __lowercase ( self , _a , _a ) -> None:
# Swap the nodes at the given positions
_a : str = self.heap[nodea_pos][0]
_a : str = self.heap[nodea_pos][0]
_a , _a : Optional[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a : List[str] = nodea_pos
_a : List[str] = nodea_pos
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> None:
_a : dict[T, dict[T, int]] = {}
_a : int = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __lowercase ( self , _a ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a : Optional[Any] = {}
self.nodes += 1
def __lowercase ( self , _a , _a , _a ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_a )
self.add_node(_a )
_a : int = weight
_a : int = weight
def __UpperCAmelCase ( __a : GraphUndirectedWeighted[T] ,) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
_a : dict[T, int] = {node: maxsize for node in graph.connections}
_a : dict[T, T | None] = {node: None for node in graph.connections}
_a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__a ,__a )
if priority_queue.is_empty():
return dist, parent
# initialization
_a : List[str] = priority_queue.extract_min()
_a : int = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a ,dist[neighbour] )
_a : List[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
_a : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a ,dist[neighbour] )
_a : Optional[Any] = node
return dist, parent
| 14 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __UpperCAmelCase ( __a : str ,__a : tuple ,__a : Path ,__a : str ,__a : Optional[Any] ,__a : Dict ,__a : Optional[Any] ,__a : Optional[Any]=False ,) -> Dict:
"""simple docstring"""
output_path.parent.mkdir(parents=__a ,exist_ok=__a )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__a ,__a ,f=output_path.as_posix() ,input_names=__a ,output_names=__a ,dynamic_axes=__a ,do_constant_folding=__a ,use_external_data_format=__a ,enable_onnx_checker=__a ,opset_version=__a ,)
else:
export(
__a ,__a ,f=output_path.as_posix() ,input_names=__a ,output_names=__a ,dynamic_axes=__a ,do_constant_folding=__a ,opset_version=__a ,)
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : int ,__a : bool = False ) -> Optional[Any]:
"""simple docstring"""
_a : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_a : List[str] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
_a : List[Any] = '''cpu'''
_a : Union[str, Any] = Path(__a )
# VAE DECODER
_a : Dict = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
_a : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
_a : Any = vae_decoder.decode
onnx_export(
__a ,model_args=(
torch.randn(1 ,__a ,25 ,25 ).to(device=__a ,dtype=__a ),
False,
) ,output_path=output_path / '''vae_decoder''' / '''model.onnx''' ,ordered_input_names=['''latent_sample''', '''return_dict'''] ,output_names=['''sample'''] ,dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} ,opset=__a ,)
del vae_decoder
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
a__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 1 |
import math
import sys
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
_a : Union[str, Any] = ''''''
try:
with open(__a ,'''rb''' ) as binary_file:
_a : List[str] = binary_file.read()
for dat in data:
_a : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
_a : Any = {'''0''': '''0''', '''1''': '''1'''}
_a , _a : Union[str, Any] = '''''', ''''''
_a : Union[str, Any] = len(__a )
for i in range(len(__a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_a : Any = lexicon[curr_string]
result += last_match_id
_a : Optional[Any] = last_match_id + '''0'''
if math.loga(__a ).is_integer():
_a : str = {}
for curr_key in list(__a ):
_a : Optional[int] = lexicon.pop(__a )
_a : Union[str, Any] = new_lex
_a : Tuple = last_match_id + '''1'''
index += 1
_a : Dict = ''''''
return result
def __UpperCAmelCase ( __a : str ,__a : str ) -> None:
"""simple docstring"""
_a : List[str] = 8
try:
with open(__a ,'''wb''' ) as opened_file:
_a : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 ,len(__a ) ,__a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__a ,2 ).to_bytes(1 ,byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
_a : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_a : Union[str, Any] = data_bits[counter:]
_a : Optional[Any] = data_bits[counter + 1 :]
return data_bits
def __UpperCAmelCase ( __a : str ,__a : str ) -> None:
"""simple docstring"""
_a : List[Any] = read_file_binary(__a )
_a : Any = remove_prefix(__a )
_a : Tuple = decompress_data(__a )
write_file_binary(__a ,__a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 14 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
a__ = [
(1000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __UpperCAmelCase ( __a : str ) -> int:
"""simple docstring"""
_a : Optional[int] = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1_000}
_a : Union[str, Any] = 0
_a : List[Any] = 0
while place < len(__a ):
if (place + 1 < len(__a )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( __a : int ) -> str:
"""simple docstring"""
_a : List[str] = []
for arabic, roman in ROMAN:
((_a) , (_a)) : Optional[Any] = divmod(__a ,__a )
result.append(roman * factor )
if number == 0:
break
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__a ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( ) -> Iterator[int]:
"""simple docstring"""
_a : Dict = 2
while True:
if is_prime(__a ):
yield num
num += 1
def __UpperCAmelCase ( __a : int = 2_000_000 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda __a : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : float = 3.0
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __lowercase ( self ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_a : Tuple = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
_a : List[Any] = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_a : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __lowercase ( self ) -> List[str]:
_a : int = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
a__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
a__ = Accelerator(kwargs_handlers=[ddp_scaler])
a__ = torch.nn.Linear(100, 200)
a__ = accelerator.prepare(model)
# Check the values changed in kwargs
a__ = ''''''
a__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 14 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
import argparse
from collections import defaultdict
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple ,__a : Tuple ,__a : Dict ,__a : Tuple ) -> List[Any]:
"""simple docstring"""
_a : List[str] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a ,'''r''' ) as f:
_a : Dict = f.readlines()
_a : str = F"""class {class_name}("""
_a : Tuple = F"""{4 * ' '}def {test_name}("""
_a : List[Any] = F"""{8 * ' '}{correct_line.split()[0]}"""
_a : Tuple = F"""{16 * ' '}{correct_line.split()[0]}"""
_a : Tuple = False
_a : str = False
_a : Any = False
_a : Dict = False
_a : Tuple = 0
_a : List[str] = 0
_a : List[Any] = []
for line in lines:
if line.startswith(__a ):
_a : Tuple = True
elif in_class and line.startswith(__a ):
_a : List[str] = True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
_a : Tuple = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_a : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_a : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
_a : Optional[Any] = False
else:
new_lines.append(__a )
with open(__a ,'''w''' ) as f:
for line in new_lines:
f.write(__a )
def __UpperCAmelCase ( __a : Dict ,__a : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
if fail is not None:
with open(__a ,'''r''' ) as f:
_a : Optional[int] = {l.strip() for l in f.readlines()}
else:
_a : List[Any] = None
with open(__a ,'''r''' ) as f:
_a : List[Any] = f.readlines()
_a : List[Any] = defaultdict(__a )
for line in correct_lines:
_a , _a , _a , _a : Dict = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a ,__a ,__a ,__a ,__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
a__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Dict:
_a : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=6_4 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=3_2 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=1_0 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ) -> int:
_a : List[Any] = parent
_a : Union[str, Any] = batch_size
_a : str = image_size
_a : Optional[int] = patch_size
_a : str = num_channels
_a : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_a : Optional[int] = hidden_act
_a : List[Any] = conv_kernel_size
_a : Union[str, Any] = output_stride
_a : int = classifier_dropout_prob
_a : str = use_labels
_a : List[Any] = is_training
_a : Optional[Any] = num_labels
_a : List[Any] = initializer_range
_a : List[Any] = scope
_a : Optional[Any] = width_multiplier
_a : Union[str, Any] = ffn_dropout
_a : List[str] = attn_dropout
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
_a : Dict = None
if self.use_labels:
_a : Any = ids_tensor([self.batch_size] , self.num_labels )
_a : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> Dict:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __lowercase ( self , _a , _a , _a , _a ) -> Optional[int]:
_a : Dict = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self , _a , _a , _a , _a ) -> int:
_a : Any = self.num_labels
_a : Optional[int] = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a ) -> Optional[int]:
_a : Dict = self.num_labels
_a : int = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : int = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = self.prepare_config_and_inputs()
_a , _a , _a , _a : List[Any] = config_and_inputs
_a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Tuple:
_a : List[Any] = MobileViTVaModelTester(self )
_a : str = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __lowercase ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> int:
pass
def __lowercase ( self ) -> List[str]:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : int = [*signature.parameters.keys()]
_a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> str:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[Any]:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(_a , _a ) )
_a : Union[str, Any] = outputs.hidden_states
_a : Optional[int] = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a : Union[str, Any] = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Any = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> List[str]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> List[str]:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
_a : List[str] = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : List[str] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : List[str] = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Any:
_a : Any = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Optional[Any] = model.to(_a )
_a : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Tuple = prepare_img()
_a : List[Any] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : str = model(**_a )
_a : Optional[int] = outputs.logits
# verify the logits
_a : Union[str, Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _a )
_a : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> int:
_a : Any = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Union[str, Any] = model.to(_a )
_a : int = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : int = prepare_img()
_a : Union[str, Any] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
_a : Dict = outputs.logits.detach().cpu()
_a : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(5_0, 6_0)] )
_a : str = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _a )
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_a )
_a : int = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _a )
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = VideoMAEConfig()
set_architecture_configs(__a ,__a )
if "finetuned" not in model_name:
_a : Optional[Any] = False
if "finetuned" in model_name:
_a : str = '''huggingface/label-files'''
if "kinetics" in model_name:
_a : Optional[int] = 400
_a : Union[str, Any] = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
_a : List[Any] = 174
_a : List[Any] = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
_a : str = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : str = {int(__a ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : Tuple = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : List[Any] ,__a : str ) -> List[str]:
"""simple docstring"""
if "small" in model_name:
_a : Any = 384
_a : int = 1_536
_a : Optional[int] = 12
_a : Optional[int] = 16
_a : Dict = 12
_a : Any = 3
_a : Any = 192
_a : int = 768
elif "large" in model_name:
_a : List[str] = 1_024
_a : Optional[int] = 4_096
_a : Tuple = 24
_a : Optional[Any] = 16
_a : List[Any] = 12
_a : str = 8
_a : Tuple = 512
_a : List[str] = 2_048
elif "huge" in model_name:
_a : Optional[int] = 1_280
_a : Optional[Any] = 5_120
_a : Union[str, Any] = 32
_a : str = 16
_a : Optional[Any] = 12
_a : List[str] = 8
_a : Union[str, Any] = 640
_a : Optional[Any] = 2_560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Dict:
"""simple docstring"""
if "encoder." in name:
_a : Tuple = name.replace('''encoder.''' ,'''''' )
if "cls_token" in name:
_a : Dict = name.replace('''cls_token''' ,'''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
_a : str = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
_a : Optional[int] = name.replace('''pos_embed''' ,'''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_a : Union[str, Any] = name.replace('''patch_embed.proj''' ,'''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a : Optional[Any] = name.replace('''patch_embed.norm''' ,'''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
_a : List[str] = name.replace('''decoder.blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
_a : Union[str, Any] = name.replace('''blocks''' ,'''videomae.encoder.layer''' )
if "attn.proj" in name:
_a : str = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name and "bias" not in name:
_a : Optional[int] = name.replace('''attn''' ,'''attention.self''' )
if "attn" in name:
_a : List[str] = name.replace('''attn''' ,'''attention.attention''' )
if "norm1" in name:
_a : int = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
_a : Union[str, Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
_a : List[str] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
_a : int = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
_a : Union[str, Any] = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
_a : List[str] = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
_a : List[str] = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_a : Dict = name.replace('''norm.weight''' ,'''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_a : Dict = name.replace('''norm.bias''' ,'''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
_a : Optional[int] = name.replace('''head''' ,'''classifier''' )
return name
def __UpperCAmelCase ( __a : Dict ,__a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a : List[str] = orig_state_dict.pop(__a )
if key.startswith('''encoder.''' ):
_a : int = key.replace('''encoder.''' ,'''''' )
if "qkv" in key:
_a : List[str] = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
_a : Dict = config.decoder_hidden_size
_a : List[str] = int(key_split[2] )
_a : Any = '''decoder.decoder_layers.'''
if "weight" in key:
_a : Tuple = val[:dim, :]
_a : Union[str, Any] = val[dim : dim * 2, :]
_a : List[Any] = val[-dim:, :]
else:
_a : Union[str, Any] = config.hidden_size
_a : Tuple = int(key_split[1] )
_a : str = '''videomae.encoder.layer.'''
if "weight" in key:
_a : Optional[int] = val[:dim, :]
_a : int = val[dim : dim * 2, :]
_a : Union[str, Any] = val[-dim:, :]
else:
_a : int = val
return orig_state_dict
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
_a : Dict = np.load(__a )
return list(__a )
def __UpperCAmelCase ( __a : Optional[int] ,__a : Any ,__a : Tuple ,__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[Any] = get_videomae_config(__a )
if "finetuned" in model_name:
_a : int = VideoMAEForVideoClassification(__a )
else:
_a : Tuple = VideoMAEForPreTraining(__a )
# download original checkpoint, hosted on Google Drive
_a : Tuple = '''pytorch_model.bin'''
gdown.cached_download(__a ,__a ,quiet=__a )
_a : Dict = torch.load(__a ,map_location='''cpu''' )
if "model" in files:
_a : Tuple = files['''model''']
else:
_a : str = files['''module''']
_a : int = convert_state_dict(__a ,__a )
model.load_state_dict(__a )
model.eval()
# verify model on basic input
_a : Tuple = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
_a : Union[str, Any] = prepare_video()
_a : Any = image_processor(__a ,return_tensors='''pt''' )
if "finetuned" not in model_name:
_a : Optional[Any] = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''' )
_a : Dict = torch.load(__a )
_a : Optional[int] = model(**__a )
_a : str = outputs.logits
_a : Tuple = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_a : Optional[Any] = torch.Size([1, 400] )
_a : str = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
_a : Any = torch.Size([1, 174] )
_a : int = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
_a : List[Any] = torch.Size([1, 1_408, 1_536] )
_a : Dict = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
_a : Union[str, Any] = torch.Size([1, 1_408, 1_536] )
_a : List[str] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
_a : Union[str, Any] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
_a : str = torch.Size([1, 1_408, 1_536] )
_a : str = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
_a : int = torch.Size([1, 400] )
_a : Dict = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
_a : Optional[int] = torch.Size([1, 400] )
_a : List[Any] = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_a : int = torch.Size([1, 400] )
_a : str = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
_a : Tuple = torch.Size([1, 400] )
_a : Union[str, Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
_a : Optional[Any] = torch.Size([1, 1_408, 1_536] )
_a : str = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_a : List[str] = torch.Size([1, 174] )
_a : Optional[Any] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
_a : List[Any] = torch.Size([1, 1_408, 1_536] )
_a : Tuple = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
_a : Union[str, Any] = torch.Size([1, 174] )
_a : Union[str, Any] = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] ,__a ,atol=1E-4 )
else:
print('''Logits:''' ,logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_a : Optional[int] = outputs.loss
assert torch.allclose(__a ,__a ,atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
model.save_pretrained(__a )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__a ,organization='''nielsr''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
_a : int = list(s_dict.keys() )
for key in keys:
_a : Union[str, Any] = R'''.*/layers_(\d+)'''
_a : List[str] = key
if re.match(__a ,__a ):
_a : Tuple = re.sub(R'''layers_(\d+)''' ,R'''block/\1/layer''' ,__a )
_a : Union[str, Any] = R'''(encoder|decoder)\/'''
if re.match(__a ,__a ):
_a : Tuple = re.match(__a ,__a ).groups()
if groups[0] == "encoder":
_a : Union[str, Any] = re.sub(R'''/mlp/''' ,R'''/1/mlp/''' ,__a )
_a : Tuple = re.sub(R'''/pre_mlp_layer_norm/''' ,R'''/1/layer_norm/''' ,__a )
elif groups[0] == "decoder":
_a : Dict = re.sub(R'''/mlp/''' ,R'''/2/mlp/''' ,__a )
_a : Optional[Any] = re.sub(R'''/pre_mlp_layer_norm/''' ,R'''/2/layer_norm/''' ,__a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a : Tuple = new_key.replace(__a ,__a )
print(F"""{key} -> {new_key}""" )
_a : List[Any] = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Optional[int] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : str = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a : Optional[Any] = s_dict[key].shape[0]
_a : Optional[Any] = s_dict[key]
for idx in range(__a ):
_a : List[str] = expert_weihts[idx]
print(F"""{key} -> {key.replace('expert/' ,'nested fstring' )}""" )
s_dict.pop(__a )
return s_dict
a__ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __UpperCAmelCase ( __a : int ,__a : Any ) -> Union[str, Any]:
"""simple docstring"""
import regex as re
with open(__a ,'''r''' ) as f:
_a : List[Any] = f.read()
_a : Dict = re.findall(R'''(.*) = ([0-9.]*)''' ,__a )
_a : List[Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a : Optional[int] = float(__a ) if '''.''' in value else int(__a )
_a : Dict = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' ,__a )[0]
_a : str = str(activation[1] )
_a : Tuple = num_experts
_a : Tuple = SwitchTransformersConfig(**__a )
return config
def __UpperCAmelCase ( __a : Dict ,__a : Optional[int] ,__a : Dict=None ,__a : int="./" ,__a : Union[str, Any]=8 ) -> Dict:
"""simple docstring"""
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
_a : List[Any] = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
_a : Tuple = convert_gin_to_config(__a ,__a )
else:
_a : Optional[int] = SwitchTransformersConfig.from_pretrained(__a )
_a : int = SwitchTransformersForConditionalGeneration(__a )
_a : Optional[int] = flax_params['''target''']
_a : Union[str, Any] = flatten_dict(__a ,sep='''/''' )
_a : Any = rename_keys(__a )
_a : Any = unflatten_dict(__a ,sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a ,__a )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 14 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 1 |
def __UpperCAmelCase ( __a : str ,__a : str ) -> list:
"""simple docstring"""
_a : Tuple = len(__a )
_a : str = []
for i in range(len(__a ) - pat_len + 1 ):
_a : Any = True
for j in range(__a ):
if s[i + j] != pattern[j]:
_a : Optional[int] = False
break
if match_found:
position.append(__a )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 14 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = '''ylacombe/bark-small'''
_a : Dict = tempfile.mkdtemp()
_a : int = '''en_speaker_1'''
_a : Dict = '''This is a test string'''
_a : int = '''speaker_embeddings_path.json'''
_a : List[Any] = '''speaker_embeddings'''
def __lowercase ( self , **_a ) -> Dict:
return AutoTokenizer.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : int = self.get_tokenizer()
_a : Optional[Any] = BarkProcessor(tokenizer=_a )
processor.save_pretrained(self.tmpdirname )
_a : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __lowercase ( self ) -> List[Any]:
_a : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __lowercase ( self ) -> Optional[int]:
_a : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a : Optional[int] = 3_5
_a : str = 2
_a : Dict = 8
_a : Any = {
'''semantic_prompt''': np.ones(_a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a : int = processor(text=self.input_string , voice_preset=_a )
_a : Optional[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(_a , **_a )
_a : List[str] = processor(text=self.input_string , voice_preset=_a )
_a : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def __lowercase ( self ) -> str:
_a : int = self.get_tokenizer()
_a : Any = BarkProcessor(tokenizer=_a )
_a : int = processor(text=self.input_string )
_a : Optional[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 14 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a__ = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
a__ = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
a__ = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowercase ( self , _a , _a , _a=4 , _a=False ) -> Tuple:
_a : int = compute_bleu(
reference_corpus=_a , translation_corpus=_a , max_order=_a , smooth=_a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 14 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 1 |
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( __a : str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text ,'''html.parser''' )
_a : str = soup.findAll('''h1''' )
_a : str = soup.findAll('''div''' ,{'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''} )
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a ,__a )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = DownBlockaD # noqa F405
UpperCAmelCase__ : List[Any] = "down"
def __lowercase ( self ) -> Tuple:
_a : List[Any] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = ResnetDownsampleBlockaD # noqa F405
UpperCAmelCase__ : Union[str, Any] = "down"
def __lowercase ( self ) -> Optional[Any]:
_a : str = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AttnDownBlockaD # noqa F405
UpperCAmelCase__ : List[Any] = "down"
def __lowercase ( self ) -> List[Any]:
_a : str = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = CrossAttnDownBlockaD # noqa F405
UpperCAmelCase__ : Any = "down"
def __lowercase ( self ) -> Optional[int]:
_a , _a : List[Any] = super().prepare_init_args_and_inputs_for_common()
_a : List[Any] = 3_2
return init_dict, inputs_dict
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = SimpleCrossAttnDownBlockaD # noqa F405
UpperCAmelCase__ : Optional[Any] = "down"
@property
def __lowercase ( self ) -> List[str]:
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __lowercase ( self ) -> int:
_a , _a : List[Any] = super().prepare_init_args_and_inputs_for_common()
_a : int = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowercase ( self ) -> Union[str, Any]:
_a : str = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = SkipDownBlockaD # noqa F405
UpperCAmelCase__ : Optional[Any] = "down"
@property
def __lowercase ( self ) -> Any:
return super().get_dummy_input(include_skip_sample=_a )
def __lowercase ( self ) -> Dict:
_a : List[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = AttnSkipDownBlockaD # noqa F405
UpperCAmelCase__ : Any = "down"
@property
def __lowercase ( self ) -> Tuple:
return super().get_dummy_input(include_skip_sample=_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DownEncoderBlockaD # noqa F405
UpperCAmelCase__ : List[str] = "down"
@property
def __lowercase ( self ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=_a )
def __lowercase ( self ) -> int:
_a : List[Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
_a : List[str] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> Tuple:
_a : int = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AttnDownEncoderBlockaD # noqa F405
UpperCAmelCase__ : int = "down"
@property
def __lowercase ( self ) -> int:
return super().get_dummy_input(include_temb=_a )
def __lowercase ( self ) -> Tuple:
_a : Union[str, Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
_a : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[int] = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = UNetMidBlockaD # noqa F405
UpperCAmelCase__ : Tuple = "mid"
def __lowercase ( self ) -> str:
_a : Any = {
'''in_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
_a : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> Any:
_a : str = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = UNetMidBlockaDCrossAttn # noqa F405
UpperCAmelCase__ : int = "mid"
def __lowercase ( self ) -> Union[str, Any]:
_a , _a : List[str] = super().prepare_init_args_and_inputs_for_common()
_a : Dict = 3_2
return init_dict, inputs_dict
def __lowercase ( self ) -> Tuple:
_a : Dict = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCAmelCase__ : Tuple = "mid"
@property
def __lowercase ( self ) -> Union[str, Any]:
return super().get_dummy_input(include_encoder_hidden_states=_a )
def __lowercase ( self ) -> int:
_a , _a : Dict = super().prepare_init_args_and_inputs_for_common()
_a : Optional[int] = 3_2
return init_dict, inputs_dict
def __lowercase ( self ) -> List[Any]:
_a : Optional[int] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = UpBlockaD # noqa F405
UpperCAmelCase__ : Any = "up"
@property
def __lowercase ( self ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __lowercase ( self ) -> Tuple:
_a : Any = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ResnetUpsampleBlockaD # noqa F405
UpperCAmelCase__ : Tuple = "up"
@property
def __lowercase ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : List[Any] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = CrossAttnUpBlockaD # noqa F405
UpperCAmelCase__ : List[str] = "up"
@property
def __lowercase ( self ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __lowercase ( self ) -> Dict:
_a , _a : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
_a : List[str] = 3_2
return init_dict, inputs_dict
def __lowercase ( self ) -> Union[str, Any]:
_a : int = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
UpperCAmelCase__ : List[Any] = "up"
@property
def __lowercase ( self ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=_a , include_encoder_hidden_states=_a )
def __lowercase ( self ) -> int:
_a , _a : int = super().prepare_init_args_and_inputs_for_common()
_a : Dict = 3_2
return init_dict, inputs_dict
def __lowercase ( self ) -> Tuple:
_a : Tuple = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = AttnUpBlockaD # noqa F405
UpperCAmelCase__ : Dict = "up"
@property
def __lowercase ( self ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = SkipUpBlockaD # noqa F405
UpperCAmelCase__ : int = "up"
@property
def __lowercase ( self ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __lowercase ( self ) -> str:
_a : Optional[Any] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = AttnSkipUpBlockaD # noqa F405
UpperCAmelCase__ : Optional[int] = "up"
@property
def __lowercase ( self ) -> Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = UpDecoderBlockaD # noqa F405
UpperCAmelCase__ : str = "up"
@property
def __lowercase ( self ) -> Optional[int]:
return super().get_dummy_input(include_temb=_a )
def __lowercase ( self ) -> int:
_a : Any = {'''in_channels''': 3_2, '''out_channels''': 3_2}
_a : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> List[str]:
_a : int = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(_a )
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AttnUpDecoderBlockaD # noqa F405
UpperCAmelCase__ : Optional[Any] = "up"
@property
def __lowercase ( self ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = {'''in_channels''': 3_2, '''out_channels''': 3_2}
_a : List[str] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self ) -> str:
_a : Union[str, Any] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(_a )
| 14 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_0_0 , _a=1_3 , _a=3_0 , _a=2 , _a=3 , _a=True , _a=True , _a=3_2 , _a=4 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_0 , _a=0.02 , _a=3 , _a=None , _a=[0, 1, 2, 3] , ) -> List[str]:
_a : List[str] = parent
_a : Tuple = 1_0_0
_a : Tuple = batch_size
_a : List[str] = image_size
_a : List[str] = patch_size
_a : Dict = num_channels
_a : List[str] = is_training
_a : Union[str, Any] = use_labels
_a : str = hidden_size
_a : int = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : List[str] = intermediate_size
_a : Any = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : str = type_sequence_label_size
_a : Optional[Any] = initializer_range
_a : Union[str, Any] = scope
_a : Dict = out_indices
_a : Dict = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Union[str, Any] = (image_size // patch_size) ** 2
_a : int = num_patches + 1
def __lowercase ( self ) -> Tuple:
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
_a : str = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> Any:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowercase ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_a : List[str] = BeitModel(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a ) -> Optional[int]:
_a : int = BeitForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a ) -> List[str]:
_a : str = self.type_sequence_label_size
_a : Dict = BeitForImageClassification(_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : Tuple = 1
_a : Optional[int] = BeitForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Any = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self , _a , _a , _a , _a ) -> List[str]:
_a : Optional[int] = self.num_labels
_a : List[Any] = BeitForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = False
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = BeitModelTester(self )
_a : Any = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowercase ( self ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> List[str]:
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> List[str]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(_a )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Dict:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Dict:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __lowercase ( self ) -> Dict:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def __lowercase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_a ), BeitForMaskedImageModeling]:
continue
_a : Optional[int] = model_class(_a )
model.to(_a )
model.train()
_a : Any = self._prepare_for_class(_a , _a , return_labels=_a )
_a : Optional[int] = model(**_a ).loss
loss.backward()
def __lowercase ( self ) -> Tuple:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_a : Tuple = False
_a : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_a ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Optional[Any] = model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
_a : Optional[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
_a : str = model(**_a ).loss
loss.backward()
def __lowercase ( self ) -> Union[str, Any]:
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __lowercase ( self ) -> Any:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : str = BeitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Optional[int]:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Optional[int]:
_a : int = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(_a )
_a : Any = self.default_image_processor
_a : Tuple = prepare_img()
_a : List[str] = image_processor(images=_a , return_tensors='''pt''' ).pixel_values.to(_a )
# prepare bool_masked_pos
_a : List[Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(_a )
# forward pass
with torch.no_grad():
_a : Dict = model(pixel_values=_a , bool_masked_pos=_a )
_a : Optional[Any] = outputs.logits
# verify the logits
_a : List[str] = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , _a )
_a : Optional[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_a )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _a , atol=1e-2 ) )
@slow
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(_a )
_a : Tuple = self.default_image_processor
_a : Optional[Any] = prepare_img()
_a : Tuple = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : Optional[Any] = outputs.logits
# verify the logits
_a : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , _a )
_a : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1e-4 ) )
_a : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def __lowercase ( self ) -> Optional[Any]:
_a : int = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
_a )
_a : Dict = self.default_image_processor
_a : Optional[Any] = prepare_img()
_a : Optional[Any] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
_a : Optional[int] = outputs.logits
# verify the logits
_a : Optional[Any] = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , _a )
_a : Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1e-4 ) )
_a : Dict = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def __lowercase ( self ) -> str:
_a : List[str] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_a : Union[str, Any] = model.to(_a )
_a : Union[str, Any] = BeitImageProcessor(do_resize=_a , size=6_4_0 , do_center_crop=_a )
_a : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_a : List[Any] = Image.open(ds[0]['''file'''] )
_a : Tuple = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Dict = model(**_a )
_a : str = outputs.logits
# verify the logits
_a : int = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , _a )
_a : str = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
_a : List[str] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_a , )
else:
_a : Any = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Optional[Any]:
_a : str = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_a : Dict = model.to(_a )
_a : int = BeitImageProcessor(do_resize=_a , size=6_4_0 , do_center_crop=_a )
_a : Optional[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_a : List[str] = Image.open(ds[0]['''file'''] )
_a : Optional[int] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : str = model(**_a )
_a : Union[str, Any] = outputs.logits.detach().cpu()
_a : str = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(5_0_0, 3_0_0)] )
_a : Dict = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , _a )
_a : str = image_processor.post_process_semantic_segmentation(outputs=_a )
_a : Tuple = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , _a )
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from __future__ import annotations
import queue
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = data
_a : Dict = None
_a : Any = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
_a : Optional[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
_a : queue.Queue = queue.Queue()
_a : Any = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
_a : List[str] = q.get()
_a : Any = F"""Enter the left node of {node_found.data}: """
_a : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
_a : str = TreeNode(int(__a ) )
_a : int = left_node
q.put(__a )
_a : Union[str, Any] = F"""Enter the right node of {node_found.data}: """
_a : List[str] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
_a : str = TreeNode(int(__a ) )
_a : List[str] = right_node
q.put(__a )
raise
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
print(node.data ,end=''',''' )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
in_order(node.left )
print(node.data ,end=''',''' )
in_order(node.right )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=''',''' )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
_a : Tuple = q.get()
print(node_dequeued.data ,end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
_a : Tuple = []
while not q.empty():
_a : List[Any] = q.get()
print(node_dequeued.data ,end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : list[TreeNode] = []
_a : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=''',''' )
stack.append(__a )
_a : List[Any] = n.left
# end of while means current node doesn't have left child
_a : str = stack.pop()
# start to traverse its right child
_a : List[str] = n.right
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : list[TreeNode] = []
_a : Tuple = node
while n or stack:
while n:
stack.append(__a )
_a : Any = n.left
_a : Tuple = stack.pop()
print(n.data ,end=''',''' )
_a : int = n.right
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a , _a : List[str] = [], []
_a : List[str] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
_a : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=''',''' )
def __UpperCAmelCase ( __a : str = "" ,__a : Optional[int]=50 ,__a : List[Any]="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_a , _a : Any = divmod(width - len(__a ) - 2 ,2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
a__ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , __lowercase , )
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[Any] ,__a : List[str] ,__a : Tuple ) -> Dict:
"""simple docstring"""
_a : Optional[int] = FunnelConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
_a : Optional[Any] = FunnelBaseModel(__a ) if base_model else FunnelModel(__a )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() ,__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
a__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 14 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 1 |
import argparse
import struct
import unittest
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> None:
_a : List[str] = data
# Initialize hash values
_a : int = [
0x6_a_0_9_e_6_6_7,
0xb_b_6_7_a_e_8_5,
0x3_c_6_e_f_3_7_2,
0xa_5_4_f_f_5_3_a,
0x5_1_0_e_5_2_7_f,
0x9_b_0_5_6_8_8_c,
0x1_f_8_3_d_9_a_b,
0x5_b_e_0_c_d_1_9,
]
# Initialize round constants
_a : Tuple = [
0x4_2_8_a_2_f_9_8,
0x7_1_3_7_4_4_9_1,
0xb_5_c_0_f_b_c_f,
0xe_9_b_5_d_b_a_5,
0x3_9_5_6_c_2_5_b,
0x5_9_f_1_1_1_f_1,
0x9_2_3_f_8_2_a_4,
0xa_b_1_c_5_e_d_5,
0xd_8_0_7_a_a_9_8,
0x1_2_8_3_5_b_0_1,
0x2_4_3_1_8_5_b_e,
0x5_5_0_c_7_d_c_3,
0x7_2_b_e_5_d_7_4,
0x8_0_d_e_b_1_f_e,
0x9_b_d_c_0_6_a_7,
0xc_1_9_b_f_1_7_4,
0xe_4_9_b_6_9_c_1,
0xe_f_b_e_4_7_8_6,
0x0_f_c_1_9_d_c_6,
0x2_4_0_c_a_1_c_c,
0x2_d_e_9_2_c_6_f,
0x4_a_7_4_8_4_a_a,
0x5_c_b_0_a_9_d_c,
0x7_6_f_9_8_8_d_a,
0x9_8_3_e_5_1_5_2,
0xa_8_3_1_c_6_6_d,
0xb_0_0_3_2_7_c_8,
0xb_f_5_9_7_f_c_7,
0xc_6_e_0_0_b_f_3,
0xd_5_a_7_9_1_4_7,
0x0_6_c_a_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_b_7_0_a_8_5,
0x2_e_1_b_2_1_3_8,
0x4_d_2_c_6_d_f_c,
0x5_3_3_8_0_d_1_3,
0x6_5_0_a_7_3_5_4,
0x7_6_6_a_0_a_b_b,
0x8_1_c_2_c_9_2_e,
0x9_2_7_2_2_c_8_5,
0xa_2_b_f_e_8_a_1,
0xa_8_1_a_6_6_4_b,
0xc_2_4_b_8_b_7_0,
0xc_7_6_c_5_1_a_3,
0xd_1_9_2_e_8_1_9,
0xd_6_9_9_0_6_2_4,
0xf_4_0_e_3_5_8_5,
0x1_0_6_a_a_0_7_0,
0x1_9_a_4_c_1_1_6,
0x1_e_3_7_6_c_0_8,
0x2_7_4_8_7_7_4_c,
0x3_4_b_0_b_c_b_5,
0x3_9_1_c_0_c_b_3,
0x4_e_d_8_a_a_4_a,
0x5_b_9_c_c_a_4_f,
0x6_8_2_e_6_f_f_3,
0x7_4_8_f_8_2_e_e,
0x7_8_a_5_6_3_6_f,
0x8_4_c_8_7_8_1_4,
0x8_c_c_7_0_2_0_8,
0x9_0_b_e_f_f_f_a,
0xa_4_5_0_6_c_e_b,
0xb_e_f_9_a_3_f_7,
0xc_6_7_1_7_8_f_2,
]
_a : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowercase ( _a ) -> bytes:
_a : Any = b'''\x80''' + (b'''\x00''' * (6_3 - (len(_a ) + 8) % 6_4))
_a : int = struct.pack('''>Q''' , (len(_a ) * 8) )
return data + padding + big_endian_integer
def __lowercase ( self ) -> None:
# Convert into blocks of 64 bytes
_a : Union[str, Any] = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_a : Optional[Any] = list(struct.unpack('''>16L''' , _a ) )
# add 48 0-ed integers
words += [0] * 4_8
_a , _a , _a , _a , _a , _a , _a , _a : Any = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
_a : Optional[int] = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
_a : Optional[Any] = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
_a : Any = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
_a : Any = self.ror(_a , 6 ) ^ self.ror(_a , 1_1 ) ^ self.ror(_a , 2_5 )
_a : Tuple = (e & f) ^ ((~e & 0xf_f_f_f_f_f_f_f) & g)
_a : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
_a : str = self.ror(_a , 2 ) ^ self.ror(_a , 1_3 ) ^ self.ror(_a , 2_2 )
_a : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
_a : int = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
_a , _a , _a , _a , _a , _a , _a , _a : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
_a : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_a : Optional[int] = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
_a : Optional[Any] = ''''''.join([hex(_a )[2:].zfill(8 ) for value in self.hashes] )
def __lowercase ( self , _a , _a ) -> int:
return 0xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> None:
import hashlib
_a : List[str] = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(_a ).hash , hashlib.shaaaa(_a ).hexdigest() )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''-s''' ,'''--string''' ,dest='''input_string''' ,default='''Hello World!! Welcome to Cryptography''' ,help='''Hash the string''' ,)
parser.add_argument(
'''-f''' ,'''--file''' ,dest='''input_file''' ,help='''Hash contents of a file''' )
_a : Optional[Any] = parser.parse_args()
_a : Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,'''rb''' ) as f:
_a : Dict = f.read()
else:
_a : Optional[Any] = bytes(__a ,'''utf-8''' )
print(SHAaaa(__a ).hash )
if __name__ == "__main__":
main()
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 1 |
from math import pow
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ,__a : int ,__a : int ,) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_a : List[str] = int(pow(__a ,__a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_a , _a : Optional[Any] = backtrack(
__a ,__a ,current_number + 1 ,__a ,__a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_a , _a : List[str] = backtrack(
__a ,__a ,current_number + 1 ,__a ,__a )
return current_sum, solutions_count
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__a ,__a ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> Tuple:
_a : Optional[int] = str(id_ )
_a : List[Any] = None
_a : List[Any] = None
_a : Tuple = []
_a : int = {} # {vertex:distance}
def __lt__( self , _a ) -> int:
return self.key < other.key
def __repr__( self ) -> Optional[int]:
return self.id
def __lowercase ( self , _a ) -> str:
self.neighbors.append(_a )
def __lowercase ( self , _a , _a ) -> Optional[int]:
_a : List[str] = weight
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Union[str, Any] ,__a : int ,__a : int ) -> Any:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,__a )
graph[b - 1].add_edge(graph[a - 1] ,__a )
def __UpperCAmelCase ( __a : list ,__a : Vertex ) -> list:
"""simple docstring"""
_a : Union[str, Any] = []
for u in graph:
_a : int = math.inf
_a : int = None
_a : str = 0
_a : List[Any] = graph[:]
while q:
_a : Any = min(__a )
q.remove(__a )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_a : str = u
_a : Dict = u.edges[v.id]
for i in range(1 ,len(__a ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCAmelCase ( __a : list ,__a : Vertex ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_a : int = math.inf
_a : Dict = None
_a : Optional[int] = 0
_a : Union[str, Any] = list(__a )
hq.heapify(__a )
while h:
_a : str = hq.heappop(__a )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_a : Optional[Any] = u
_a : str = u.edges[v.id]
hq.heapify(__a )
for i in range(1 ,len(__a ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = 1_0
def __lowercase ( self ) -> List[Any]:
_a : Optional[Any] = [1, 2, 3, 4]
_a : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a )
def __lowercase ( self ) -> str:
_a : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
_a : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a )
def __lowercase ( self ) -> List[str]:
_a : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
_a : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(_a , self.block_size , 0 ) , _a )
def __lowercase ( self ) -> List[str]:
_a : Dict = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
_a , _a : Optional[int] = process_story(_a )
self.assertEqual(_a , [] )
def __lowercase ( self ) -> str:
_a : Dict = ''''''
_a , _a : str = process_story(_a )
self.assertEqual(_a , [] )
self.assertEqual(_a , [] )
def __lowercase ( self ) -> Optional[Any]:
_a : int = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
_a , _a : Optional[int] = process_story(_a )
_a : Any = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_a , _a )
_a : List[str] = ['''It was the best of times.''']
self.assertEqual(_a , _a )
def __lowercase ( self ) -> str:
_a : List[Any] = torch.tensor([1, 2, 3, 4] )
_a : Optional[int] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_a , 0 ).numpy() , expected.numpy() )
def __lowercase ( self ) -> int:
_a : int = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
_a : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_a , 2_3 ).numpy() , expected.numpy() )
def __lowercase ( self ) -> int:
_a : Dict = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_a : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_a , 1 ).numpy() , expected.numpy() )
def __lowercase ( self ) -> str:
_a : List[Any] = 1_0_1
_a : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
_a : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_a : Dict = compute_token_type_ids(_a , _a )
np.testing.assert_array_equal(_a , _a )
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_2 , _a=7 , _a=True , _a=True , _a=True , _a=9_9 , _a=3_2 , _a=3_2 , _a=2 , _a=4 , _a=3_7 , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=0.02 , _a=0 , _a=None , ) -> Optional[Any]:
_a : Optional[Any] = parent
_a : int = batch_size
_a : Optional[int] = seq_length
_a : List[Any] = is_training
_a : Dict = use_input_mask
_a : Optional[Any] = use_labels
_a : Tuple = vocab_size
_a : List[Any] = hidden_size
_a : str = projection_dim
_a : Optional[int] = num_hidden_layers
_a : Union[str, Any] = num_attention_heads
_a : Union[str, Any] = intermediate_size
_a : int = dropout
_a : List[Any] = attention_dropout
_a : Tuple = max_position_embeddings
_a : Dict = initializer_range
_a : Union[str, Any] = scope
_a : Tuple = bos_token_id
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Optional[int] = None
if self.use_input_mask:
_a : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a : int = input_mask.numpy()
_a , _a : int = input_mask.shape
_a : List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
_a : int = 1
_a : Dict = 0
_a : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(_a )
def __lowercase ( self ) -> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowercase ( self , _a , _a , _a ) -> Dict:
_a : int = TFBlipTextModel(config=_a )
_a : str = model(_a , attention_mask=_a , training=_a )
_a : int = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self ) -> str:
_a : Optional[Any] = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Dict = False
def __lowercase ( self ) -> Any:
_a : int = BlipTextModelTester(self )
_a : Optional[Any] = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[Any]:
pass
def __lowercase ( self ) -> int:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __lowercase ( self ) -> Dict:
pass
@slow
def __lowercase ( self ) -> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = TFBlipTextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self , _a=True ) -> List[str]:
super().test_pt_tf_model_equivalence(allow_missing_keys=_a )
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> List[str]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __lowercase ( cls , *_a , **_a ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
from math import isclose, sqrt
def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> tuple[float, float, float]:
"""simple docstring"""
_a : Any = point_y / 4 / point_x
_a : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_a : Optional[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_a : Optional[Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_a : List[Any] = outgoing_gradient**2 + 4
_a : int = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_a : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
_a : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_a : Dict = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_a : Optional[Any] = x_minus if isclose(__a ,__a ) else x_plus
_a : Optional[Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCAmelCase ( __a : float = 1.4 ,__a : float = -9.6 ) -> int:
"""simple docstring"""
_a : int = 0
_a : float = first_x_coord
_a : float = first_y_coord
_a : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_a , _a , _a : Dict = next_point(__a ,__a ,__a )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __UpperCAmelCase ( __a : Any ) -> str:
"""simple docstring"""
_a : List[str] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' ,__a ).groups()[0]
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a=None , _a=None ) -> Dict:
_a : Optional[int] = file_names
_a : List[str] = image_transform
_a : List[Any] = label_to_id
def __len__( self ) -> Any:
return len(self.file_names )
def __getitem__( self , _a ) -> Tuple:
_a : Union[str, Any] = self.file_names[idx]
_a : Tuple = PIL.Image.open(_a )
_a : Dict = raw_image.convert('''RGB''' )
if self.image_transform is not None:
_a : Tuple = self.image_transform(_a )
_a : Dict = extract_label(_a )
if self.label_to_id is not None:
_a : Any = self.label_to_id[label]
return {"image": image, "label": label}
def __UpperCAmelCase ( __a : str ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
if args.with_tracking:
_a : Tuple = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='''all''' ,project_dir=args.project_dir )
else:
_a : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : List[Any] = config['''lr''']
_a : Optional[Any] = int(config['''num_epochs'''] )
_a : List[Any] = int(config['''seed'''] )
_a : Optional[Any] = int(config['''batch_size'''] )
_a : str = config['''image_size''']
if not isinstance(__a ,(list, tuple) ):
_a : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,'''isdigit''' ):
if args.checkpointing_steps == "epoch":
_a : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_a : int = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_a : Optional[int] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_a : List[str] = os.path.split(__a )[-1].split('''.''' )[0]
accelerator.init_trackers(__a ,__a )
# Grab all the image filenames
_a : Dict = [os.path.join(args.data_dir ,__a ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
_a : Dict = [extract_label(__a ) for fname in file_names]
_a : str = list(set(__a ) )
id_to_label.sort()
_a : List[str] = {lbl: i for i, lbl in enumerate(__a )}
# Set the seed before splitting the data.
np.random.seed(__a )
torch.manual_seed(__a )
torch.cuda.manual_seed_all(__a )
# Split our filenames between train and validation
_a : str = np.random.permutation(len(__a ) )
_a : List[Any] = int(0.8 * len(__a ) )
_a : Dict = random_perm[:cut]
_a : Any = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_a : Optional[int] = Compose([RandomResizedCrop(__a ,scale=(0.5, 1.0) ), ToTensor()] )
_a : Any = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__a ,label_to_id=__a )
# For evaluation, we use a deterministic Resize
_a : Dict = Compose([Resize(__a ), ToTensor()] )
_a : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__a ,label_to_id=__a )
# Instantiate dataloaders.
_a : Tuple = DataLoader(__a ,shuffle=__a ,batch_size=__a ,num_workers=4 )
_a : Optional[Any] = DataLoader(__a ,shuffle=__a ,batch_size=__a ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : str = create_model('''resnet50d''' ,pretrained=__a ,num_classes=len(__a ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : int = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_a : Optional[int] = False
for param in model.get_classifier().parameters():
_a : Any = True
# We normalize the batches of images to be a bit faster.
_a : int = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
_a : Dict = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_a : Union[str, Any] = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
_a : Optional[int] = OneCycleLR(optimizer=__a ,max_lr=__a ,epochs=__a ,steps_per_epoch=len(__a ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Any = accelerator.prepare(
__a ,__a ,__a ,__a ,__a )
# We need to keep track of how many total steps we have iterated over
_a : Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
_a : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_a : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_a : Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_a : List[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_a : Optional[int] = os.path.splitext(__a )[0]
if "epoch" in training_difference:
_a : List[Any] = int(training_difference.replace('''epoch_''' ,'''''' ) ) + 1
_a : str = None
else:
_a : str = int(training_difference.replace('''step_''' ,'''''' ) )
_a : Any = resume_step // len(__a )
resume_step -= starting_epoch * len(__a )
# Now we train the model
for epoch in range(__a ,__a ):
model.train()
if args.with_tracking:
_a : List[Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_a : List[Any] = accelerator.skip_first_batches(__a ,__a )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_a : str = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a : Dict = (batch['''image'''] - mean) / std
_a : Optional[int] = model(__a )
_a : Union[str, Any] = torch.nn.functional.cross_entropy(__a ,batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__a ,__a ):
_a : List[str] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_a : Dict = os.path.join(args.output_dir ,__a )
accelerator.save_state(__a )
model.eval()
_a : Union[str, Any] = 0
_a : List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_a : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
_a : Tuple = (batch['''image'''] - mean) / std
with torch.no_grad():
_a : int = model(__a )
_a : Tuple = outputs.argmax(dim=-1 )
_a , _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
_a : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_a : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(__a ),
'''epoch''': epoch,
} ,step=__a ,)
if checkpointing_steps == "epoch":
_a : str = F"""epoch_{epoch}"""
if args.output_dir is not None:
_a : Optional[int] = os.path.join(args.output_dir ,__a )
accelerator.save_state(__a )
if args.with_tracking:
accelerator.end_training()
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' ,required=__a ,help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' ,help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__a ,default=__a ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' ,type=__a ,default=__a ,help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' ,)
parser.add_argument(
'''--output_dir''' ,type=__a ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--resume_from_checkpoint''' ,type=__a ,default=__a ,help='''If the training should continue from a checkpoint folder.''' ,)
parser.add_argument(
'''--with_tracking''' ,action='''store_true''' ,help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' ,)
parser.add_argument(
'''--project_dir''' ,type=__a ,default='''logs''' ,help='''Location on where to store experiment tracking logs` and relevent project information''' ,)
_a : str = parser.parse_args()
_a : Optional[Any] = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(__a ,__a )
if __name__ == "__main__":
main()
| 14 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 1 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 1 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
if len(__a ) == 0:
return []
_a , _a : Tuple = min(__a ), max(__a )
_a : int = int(max_value - min_value ) + 1
_a : list[list] = [[] for _ in range(__a )]
for i in my_list:
buckets[int(i - min_value )].append(__a )
return [v for bucket in buckets for v in sorted(__a )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import string
def __UpperCAmelCase ( __a : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_a : int = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
_a : Union[str, Any] = string.ascii_uppercase.find(__a )
_a : Optional[int] = num - key
if num < 0:
_a : Any = num + len(string.ascii_uppercase )
_a : Optional[int] = translated + string.ascii_uppercase[num]
else:
_a : Optional[int] = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[Any] = input('''Encrypted message: ''' )
_a : Union[str, Any] = message.upper()
decrypt(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' ,[
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ,__a : List[Any] ,__a : Tuple ,__a : List[Any] ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ,) -> Any:
"""simple docstring"""
_a : int = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
_a , _a : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
_a : Dict = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
assert base_extractor.is_extractable(__a )
_a : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__a ,__a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_a : Dict = file_path.read_text(encoding='''utf-8''' )
else:
_a : List[Any] = output_path.read_text(encoding='''utf-8''' )
_a : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' ,[
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] ,)
def __UpperCAmelCase ( __a : Tuple ,__a : Any ,__a : int ,__a : Optional[int] ,__a : Dict ,__a : List[str] ,__a : Union[str, Any] ,__a : str ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ,) -> List[str]:
"""simple docstring"""
_a : int = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
_a : List[Any] = input_paths[compression_format]
if input_path is None:
_a : int = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
_a : str = Extractor.infer_extractor_format(__a )
assert extractor_format is not None
_a : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__a ,__a ,__a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_a : Any = file_path.read_text(encoding='''utf-8''' )
else:
_a : List[Any] = output_path.read_text(encoding='''utf-8''' )
_a : Tuple = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Any ) -> Tuple:
"""simple docstring"""
import tarfile
_a : Tuple = tmp_path / '''data_dot_dot'''
directory.mkdir()
_a : Any = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__a ,'''w''' ) as f:
f.add(__a ,arcname=os.path.join('''..''' ,text_file.name ) )
return path
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
import tarfile
_a : Dict = tmp_path / '''data_sym_link'''
directory.mkdir()
_a : List[str] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' ,directory / '''subdir''' ,target_is_directory=__a )
with tarfile.TarFile(__a ,'''w''' ) as f:
f.add(str(directory / '''subdir''' ) ,arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' ,[('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] ,)
def __UpperCAmelCase ( __a : str ,__a : Optional[Any] ,__a : Dict ,__a : List[Any] ,__a : List[str] ,__a : Dict ) -> List[str]:
"""simple docstring"""
_a : Union[str, Any] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
_a : Optional[int] = insecure_tar_files[insecure_tar_file]
_a : Optional[Any] = tmp_path / '''extracted'''
TarExtractor.extract(__a ,__a )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __UpperCAmelCase ( __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
_a : Union[str, Any] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__a )
assert zipfile.is_zipfile(str(__a ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__a ) # but we're right
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : Dict ) -> Optional[Any]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__a ,n - 1 ,__a ) * a) % mod
else:
_a : Optional[Any] = binary_exponentiation(__a ,n / 2 ,__a )
return (b * b) % mod
# a prime number
a__ = 701
a__ = 1000000000
a__ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
_a , _a : Union[str, Any] = 1, 1
_a : Any = []
for i in range(1 ,n + 1 ):
_a : Optional[int] = prev_numerator + 2 * prev_denominator
_a : Union[str, Any] = prev_numerator + prev_denominator
if len(str(__a ) ) > len(str(__a ) ):
result.append(__a )
_a : List[Any] = numerator
_a : List[Any] = denominator
return len(__a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a__ = TypeVar('''KEY''')
a__ = TypeVar('''VAL''')
@dataclass(frozen=__lowercase , slots=__lowercase )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class UpperCAmelCase_ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(_a , _a )
def __bool__( self ) -> bool:
return False
a__ = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , _a = 8 , _a = 0.75 ) -> None:
_a : Tuple = initial_block_size
_a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a : Tuple = capacity_factor
_a : Optional[Any] = 0
def __lowercase ( self , _a ) -> int:
return hash(_a ) % len(self._buckets )
def __lowercase ( self , _a ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self , _a , _a , _a ) -> bool:
_a : int = self._buckets[ind]
if not stored:
_a : Dict = _Item(_a , _a )
self._len += 1
return True
elif stored.key == key:
_a : int = _Item(_a , _a )
return True
else:
return False
def __lowercase ( self ) -> bool:
_a : Optional[int] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_a )
def __lowercase ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_a : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self , _a ) -> None:
_a : Optional[Any] = self._buckets
_a : Dict = [None] * new_size
_a : int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self , _a ) -> Iterator[int]:
_a : Union[str, Any] = self._get_bucket_index(_a )
for _ in range(len(self._buckets ) ):
yield ind
_a : Dict = self._get_next_ind(_a )
def __lowercase ( self , _a , _a ) -> None:
for ind in self._iterate_buckets(_a ):
if self._try_set(_a , _a , _a ):
break
def __setitem__( self , _a , _a ) -> None:
if self._is_full():
self._size_up()
self._add_item(_a , _a )
def __delitem__( self , _a ) -> None:
for ind in self._iterate_buckets(_a ):
_a : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(_a )
if item is _deleted:
continue
if item.key == key:
_a : Any = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _a ) -> VAL:
for ind in self._iterate_buckets(_a ):
_a : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_a )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
_a : str = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 14 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , **_a ) -> Optional[Any]:
requires_backends(self , ['''bs4'''] )
super().__init__(**_a )
def __lowercase ( self , _a ) -> int:
_a : Optional[Any] = []
_a : Optional[Any] = []
_a : Optional[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_a : Optional[int] = parent.find_all(child.name , recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a , 1 ) if s is child ) )
_a : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def __lowercase ( self , _a ) -> List[Any]:
_a : List[Any] = BeautifulSoup(_a , '''html.parser''' )
_a : List[str] = []
_a : Optional[Any] = []
_a : Tuple = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_a : Tuple = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
_a , _a : Tuple = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_a ) != len(_a ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def __lowercase ( self , _a , _a ) -> Dict:
_a : Any = ''''''
for tagname, subs in zip(_a , _a ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , _a ) -> BatchFeature:
_a : Any = False
# Check that strings has a valid type
if isinstance(_a , _a ):
_a : Any = True
elif isinstance(_a , (list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] , _a ):
_a : Optional[int] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_a )}.""" )
_a : Any = bool(isinstance(_a , (list, tuple) ) and (isinstance(html_strings[0] , _a )) )
if not is_batched:
_a : Optional[Any] = [html_strings]
# Get nodes + xpaths
_a : List[str] = []
_a : Union[str, Any] = []
for html_string in html_strings:
_a , _a , _a : Tuple = self.get_three_from_single(_a )
nodes.append(_a )
_a : Union[str, Any] = []
for node, tag_list, sub_list in zip(_a , _a , _a ):
_a : Any = self.construct_xpath(_a , _a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
_a : Tuple = {'''nodes''': nodes, '''xpaths''': xpaths}
_a : int = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 14 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 1 |
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
_a : Union[str, Any] = ''''''
while len(__a ) % 3 != 0:
_a : Tuple = '''0''' + bin_string
_a : int = [
bin_string[index : index + 3]
for index in range(len(__a ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_a : str = 0
for index, val in enumerate(__a ):
oct_val += int(2 ** (2 - index) * int(__a ) )
oct_string += str(__a )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "convbert"
def __init__( self , _a=3_0_5_2_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=1 , _a=0 , _a=2 , _a=7_6_8 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ) -> Tuple:
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
_a : Optional[int] = vocab_size
_a : str = hidden_size
_a : str = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : int = intermediate_size
_a : List[str] = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : str = type_vocab_size
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : str = embedding_size
_a : Tuple = head_ratio
_a : Tuple = conv_kernel_size
_a : Union[str, Any] = num_groups
_a : Dict = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 14 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from __future__ import annotations
a__ = list[list[int]]
# assigning initial values to the grid
a__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __UpperCAmelCase ( __a : Matrix ,__a : int ,__a : int ,__a : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __UpperCAmelCase ( __a : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __UpperCAmelCase ( __a : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
_a , _a : Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 ,10 ):
if is_safe(__a ,__a ,__a ,__a ):
_a : List[str] = digit
if sudoku(__a ) is not None:
return grid
_a : Optional[int] = 0
return None
def __UpperCAmelCase ( __a : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a ,end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
a__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __UpperCAmelCase ( __a : int ,__a : Dict=1 ) -> str:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=0 ) -> Optional[Any]:
"""simple docstring"""
_a : Dict = []
for old_item in old_list:
_a : Optional[Any] = old_item.replace('''in_layers.0''' ,'''norm1''' )
_a : int = new_item.replace('''in_layers.2''' ,'''conv1''' )
_a : List[Any] = new_item.replace('''out_layers.0''' ,'''norm2''' )
_a : int = new_item.replace('''out_layers.3''' ,'''conv2''' )
_a : Optional[Any] = new_item.replace('''emb_layers.1''' ,'''time_emb_proj''' )
_a : int = new_item.replace('''skip_connection''' ,'''conv_shortcut''' )
_a : Optional[int] = shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __UpperCAmelCase ( __a : str ,__a : Tuple=0 ) -> Dict:
"""simple docstring"""
_a : int = []
for old_item in old_list:
_a : Union[str, Any] = old_item
_a : str = new_item.replace('''norm.weight''' ,'''group_norm.weight''' )
_a : str = new_item.replace('''norm.bias''' ,'''group_norm.bias''' )
_a : Optional[int] = new_item.replace('''proj_out.weight''' ,'''proj_attn.weight''' )
_a : List[str] = new_item.replace('''proj_out.bias''' ,'''proj_attn.bias''' )
_a : Union[str, Any] = shave_segments(__a ,n_shave_prefix_segments=__a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : Dict ,__a : List[str]=None ,__a : Any=None ,__a : List[str]=None ) -> Any:
"""simple docstring"""
assert isinstance(__a ,__a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Any = old_checkpoint[path]
_a : Any = old_tensor.shape[0] // 3
_a : str = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : Tuple = old_tensor.shape[0] // config['''num_head_channels'''] // 3
_a : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Optional[Any] = old_tensor.split(channels // num_heads ,dim=1 )
_a : List[str] = query.reshape(__a )
_a : List[str] = key.reshape(__a )
_a : Dict = value.reshape(__a )
for path in paths:
_a : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Optional[int] = new_path.replace('''middle_block.0''' ,'''mid_block.resnets.0''' )
_a : List[Any] = new_path.replace('''middle_block.1''' ,'''mid_block.attentions.0''' )
_a : Tuple = new_path.replace('''middle_block.2''' ,'''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : Dict = new_path.replace(replacement['''old'''] ,replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : Any = old_checkpoint[path['''old''']][:, :, 0]
else:
_a : str = old_checkpoint[path['''old''']]
def __UpperCAmelCase ( __a : List[Any] ,__a : Dict ) -> Tuple:
"""simple docstring"""
_a : List[Any] = {}
_a : Tuple = checkpoint['''time_embed.0.weight''']
_a : str = checkpoint['''time_embed.0.bias''']
_a : Dict = checkpoint['''time_embed.2.weight''']
_a : int = checkpoint['''time_embed.2.bias''']
_a : Dict = checkpoint['''input_blocks.0.0.weight''']
_a : List[Any] = checkpoint['''input_blocks.0.0.bias''']
_a : Tuple = checkpoint['''out.0.weight''']
_a : str = checkpoint['''out.0.bias''']
_a : str = checkpoint['''out.2.weight''']
_a : int = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_a : List[Any] = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the middle blocks only
_a : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(__a )
}
# Retrieves the keys for the output blocks only
_a : Tuple = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_a : Optional[Any] = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(__a )
}
for i in range(1 ,__a ):
_a : Any = (i - 1) // (config['''num_res_blocks'''] + 1)
_a : List[Any] = (i - 1) % (config['''num_res_blocks'''] + 1)
_a : str = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
_a : List[str] = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : Optional[Any] = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
_a : Any = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
_a : List[Any] = renew_resnet_paths(__a )
_a : Any = {'''old''': F"""input_blocks.{i}.0""", '''new''': F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[int] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path, resnet_op] ,config=__a )
if len(__a ):
_a : Union[str, Any] = renew_attention_paths(__a )
_a : List[Any] = {
'''old''': F"""input_blocks.{i}.1""",
'''new''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Union[str, Any] = {
F"""input_blocks.{i}.1.qkv.bias""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
'''key''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=__a ,config=__a ,)
_a : int = middle_blocks[0]
_a : str = middle_blocks[1]
_a : int = middle_blocks[2]
_a : int = renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : Dict = renew_resnet_paths(__a )
assign_to_checkpoint(__a ,__a ,__a ,config=__a )
_a : str = renew_attention_paths(__a )
_a : Dict = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
__a ,__a ,__a ,attention_paths_to_split=__a ,config=__a )
for i in range(__a ):
_a : str = i // (config['''num_res_blocks'''] + 1)
_a : Union[str, Any] = i % (config['''num_res_blocks'''] + 1)
_a : List[Any] = [shave_segments(__a ,2 ) for name in output_blocks[i]]
_a : str = {}
for layer in output_block_layers:
_a , _a : Optional[Any] = layer.split('''.''' )[0], shave_segments(__a ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__a )
else:
_a : int = [layer_name]
if len(__a ) > 1:
_a : Dict = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
_a : List[Any] = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
_a : Tuple = renew_resnet_paths(__a )
_a : Tuple = renew_resnet_paths(__a )
_a : str = {'''old''': F"""output_blocks.{i}.0""", '''new''': F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(__a ,__a ,__a ,additional_replacements=[meta_path] ,config=__a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_a : List[str] = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
_a : Union[str, Any] = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(__a ) == 2:
_a : Tuple = []
if len(__a ):
_a : Optional[int] = renew_attention_paths(__a )
_a : str = {
'''old''': F"""output_blocks.{i}.1""",
'''new''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Tuple = {
F"""output_blocks.{i}.1.qkv.bias""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
'''key''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__a ,__a ,__a ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None ,config=__a ,)
else:
_a : str = renew_resnet_paths(__a ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : Any = '''.'''.join(['''output_blocks''', str(__a ), path['''old''']] )
_a : Tuple = '''.'''.join(['''up_blocks''', str(__a ), '''resnets''', str(__a ), path['''new''']] )
_a : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a__ = parser.parse_args()
a__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a__ = json.loads(f.read())
a__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
a__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
a__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 14 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
a__ = TypeVar('''T''')
a__ = Union[List[T], Tuple[T, ...]]
a__ = Union[T, List[T], Dict[str, T]]
a__ = Union[str, bytes, os.PathLike]
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
_a : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_a , '''num_attention_heads''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=6_4 , _a=3 , _a=3 , _a=2 , _a=1 , _a=1_6 , _a=[1_2_8, 2_5_6, 3_8_4] , _a=[4, 6, 8] , _a=[2, 3, 4] , _a=[1_6, 1_6, 1_6] , _a=0 , _a=[2, 2, 2] , _a=[2, 2, 2] , _a=0.02 , _a=True , _a=True , _a=2 , ) -> Tuple:
_a : Optional[int] = parent
_a : Optional[Any] = batch_size
_a : Any = image_size
_a : int = num_channels
_a : str = kernel_size
_a : Dict = stride
_a : List[Any] = padding
_a : Union[str, Any] = hidden_sizes
_a : str = num_attention_heads
_a : Optional[int] = depths
_a : Any = key_dim
_a : Any = drop_path_rate
_a : List[str] = patch_size
_a : str = attention_ratio
_a : Union[str, Any] = mlp_ratio
_a : Any = initializer_range
_a : Any = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_a : Any = is_training
_a : Tuple = use_labels
_a : str = num_labels
_a : List[Any] = initializer_range
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> Optional[Any]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : str = LevitModel(config=_a )
model.to(_a )
model.eval()
_a : str = model(_a )
_a : List[Any] = (self.image_size, self.image_size)
_a , _a : Optional[int] = image_size[0], image_size[1]
for _ in range(4 ):
_a : Dict = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_a : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : Optional[Any] = self.num_labels
_a : List[Any] = LevitForImageClassification(_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Dict:
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : int = config_and_inputs
_a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Optional[int]:
_a : str = LevitModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> int:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def __lowercase ( self ) -> int:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def __lowercase ( self ) -> List[str]:
pass
def __lowercase ( self ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_a )
_a : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Optional[Any] = model(**self._prepare_for_class(_a , _a ) )
_a : str = outputs.hidden_states
_a : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(_a ) , _a )
_a : Union[str, Any] = (self.model_tester.image_size, self.model_tester.image_size)
_a , _a : List[Any] = image_size[0], image_size[1]
for _ in range(4 ):
_a : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_a : int = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Tuple:
pass
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : List[Any] = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowercase ( self ) -> Optional[int]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> str:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_a )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_a : int = model_class(_a )
model.to(_a )
model.train()
_a : List[str] = self._prepare_for_class(_a , _a , return_labels=_a )
_a : Optional[int] = model(**_a ).loss
loss.backward()
def __lowercase ( self ) -> Optional[int]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_a : Any = False
_a : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_a : Dict = model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
_a : Dict = self._prepare_for_class(_a , _a , return_labels=_a )
_a : Optional[Any] = model(**_a ).loss
loss.backward()
def __lowercase ( self ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_a ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
_a : Optional[int] = problem_type['''title''']
_a : Optional[Any] = problem_type['''num_labels''']
_a : Optional[Any] = model_class(_a )
model.to(_a )
model.train()
_a : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
if problem_type["num_labels"] > 1:
_a : int = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_a : Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_a ) as warning_list:
_a : List[Any] = model(**_a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __lowercase ( self ) -> Any:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = LevitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> List[Any]:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self ) -> Dict:
_a : List[str] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_a )
_a : int = self.default_image_processor
_a : int = prepare_img()
_a : List[str] = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Union[str, Any] = model(**_a )
# verify the logits
_a : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : List[Any] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
import os
from datetime import datetime as dt
from github import Github
a__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
_a : Tuple = g.get_repo('''huggingface/diffusers''' )
_a : str = repo.get_issues(state='''open''' )
for issue in open_issues:
_a : Optional[int] = sorted(issue.get_comments() ,key=lambda __a : i.created_at ,reverse=__a )
_a : Union[str, Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=2 , _a=True , _a=False , _a=1_0 , _a=3 , _a=3_2 * 4 , _a=3_2 * 6 , _a=4 , _a=3_2 , ) -> Union[str, Any]:
_a : int = parent
_a : List[str] = batch_size
_a : Optional[int] = is_training
_a : Tuple = use_auxiliary_loss
_a : Tuple = num_queries
_a : List[str] = num_channels
_a : Tuple = min_size
_a : Union[str, Any] = max_size
_a : Optional[Any] = num_labels
_a : int = mask_feature_size
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
_a : List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
_a : int = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
_a : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __lowercase ( self ) -> List[Any]:
_a , _a , _a , _a , _a : Optional[int] = self.prepare_config_and_inputs()
_a : str = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def __lowercase ( self , _a , _a ) -> Optional[Any]:
_a : Dict = output.encoder_hidden_states
_a : Tuple = output.pixel_decoder_hidden_states
_a : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_config.decoder_layers )
def __lowercase ( self , _a , _a , _a , _a=False ) -> Tuple:
with torch.no_grad():
_a : Tuple = MaskFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(pixel_values=_a , pixel_mask=_a )
_a : int = model(_a , output_hidden_states=_a )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def __lowercase ( self , _a , _a , _a , _a , _a ) -> Tuple:
_a : Tuple = MaskFormerForInstanceSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Optional[Any] = model(pixel_values=_a , pixel_mask=_a )
_a : str = model(_a )
comm_check_on_output(_a )
_a : Tuple = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
def __lowercase ( self ) -> str:
_a : str = MaskFormerModelTester(self )
_a : Any = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> str:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a )
def __lowercase ( self ) -> str:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def __lowercase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Tuple:
pass
def __lowercase ( self ) -> str:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a : Any = MaskFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self ) -> List[str]:
_a : Any = (self.model_tester.min_size,) * 2
_a : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_a ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=_a ),
'''class_labels''': torch.zeros(2 , 1_0 , device=_a ).long(),
}
_a : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_a )
_a : List[Any] = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self ) -> Any:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_a , **_a , output_hidden_states=_a )
def __lowercase ( self ) -> List[str]:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_a ).to(_a )
_a : str = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a : Tuple = self.all_model_classes[1]
_a , _a , _a , _a , _a : int = self.model_tester.prepare_config_and_inputs()
_a : Dict = model_class(_a )
model.to(_a )
model.train()
_a : Optional[int] = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def __lowercase ( self ) -> Dict:
# only MaskFormerForInstanceSegmentation has the loss
_a : List[str] = self.all_model_classes[1]
_a , _a , _a , _a , _a : Tuple = self.model_tester.prepare_config_and_inputs()
_a : Union[str, Any] = True
_a : Tuple = True
_a : Optional[Any] = model_class(_a )
model.to(_a )
model.train()
_a : Optional[int] = model(_a , mask_labels=_a , class_labels=_a )
_a : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a__ = 1E-4
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def __lowercase ( self ) -> Any:
_a : Dict = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_a )
_a : Optional[int] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : int = image_processor(_a , return_tensors='''pt''' ).to(_a )
_a : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a : List[str] = model(**_a )
_a : Union[str, Any] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
_a : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
_a : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def __lowercase ( self ) -> Optional[Any]:
_a : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_a )
.eval()
)
_a : Optional[int] = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : Union[str, Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
_a : Tuple = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a : Any = model(**_a )
# masks_queries_logits
_a : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_a : Tuple = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
_a : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Dict = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __lowercase ( self ) -> str:
_a : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(_a )
.eval()
)
_a : Dict = self.default_image_processor
_a : str = prepare_img()
_a : Union[str, Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
_a : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_a , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_a : int = model(**_a )
# masks_queries_logits
_a : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_a : Union[str, Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_a : str = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Union[str, Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(_a )
.eval()
)
_a : Optional[Any] = self.default_image_processor
_a : str = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_a : List[str] = inputs['''pixel_values'''].to(_a )
_a : Dict = [el.to(_a ) for el in inputs['''mask_labels''']]
_a : List[str] = [el.to(_a ) for el in inputs['''class_labels''']]
with torch.no_grad():
_a : Tuple = model(**_a )
self.assertTrue(outputs.loss is not None )
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_a : Tuple = size if size is not None else {'''shortest_edge''': 3_8_4}
_a : Optional[Any] = get_size_dict(_a , default_to_square=_a )
_a : int = do_resize
_a : Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
_a : str = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_a : List[Any] = resample
_a : Optional[Any] = do_rescale
_a : str = rescale_factor
_a : str = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , _a , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : List[str] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_a : str = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_a : Dict = int(shortest_edge / crop_pct )
_a : Optional[Any] = get_resize_output_image_size(_a , size=_a , default_to_square=_a )
_a : Dict = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> str:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : List[Any] = do_resize if do_resize is not None else self.do_resize
_a : str = crop_pct if crop_pct is not None else self.crop_pct
_a : int = resample if resample is not None else self.resample
_a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_a : List[str] = image_mean if image_mean is not None else self.image_mean
_a : List[str] = image_std if image_std is not None else self.image_std
_a : Optional[Any] = size if size is not None else self.size
_a : Optional[int] = get_size_dict(_a , default_to_square=_a )
_a : Any = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : Union[str, Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : Optional[int] = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_rescale:
_a : int = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : int = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 14 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "microsoft/speecht5_tts"
UpperCAmelCase__ : int = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
UpperCAmelCase__ : List[Any] = "text_reader"
UpperCAmelCase__ : Optional[Any] = SpeechTaProcessor
UpperCAmelCase__ : Union[str, Any] = SpeechTaForTextToSpeech
UpperCAmelCase__ : Union[str, Any] = SpeechTaHifiGan
UpperCAmelCase__ : Dict = ["text"]
UpperCAmelCase__ : str = ["audio"]
def __lowercase ( self ) -> Tuple:
if self.post_processor is None:
_a : str = '''microsoft/speecht5_hifigan'''
super().setup()
def __lowercase ( self , _a , _a=None ) -> Optional[Any]:
_a : Union[str, Any] = self.pre_processor(text=_a , return_tensors='''pt''' , truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
_a : Dict = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
_a : List[str] = torch.tensor(embeddings_dataset[7_3_0_5]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowercase ( self , _a ) -> List[str]:
with torch.no_grad():
return self.model.generate_speech(**_a )
def __lowercase ( self , _a ) -> Tuple:
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 14 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[str]:
_a : Any = {}
def __lowercase ( self , _a , _a , _a=1 ) -> Tuple:
if self.graph.get(_a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a : Union[str, Any] = [[w, v]]
if not self.graph.get(_a ):
_a : Dict = []
def __lowercase ( self ) -> Union[str, Any]:
return list(self.graph )
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
def __lowercase ( self , _a=-2 , _a=-1 ) -> Dict:
if s == d:
return []
_a : Optional[int] = []
_a : int = []
if s == -2:
_a : Optional[int] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
_a : Any = stack[len(_a ) - 1]
else:
_a : Dict = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def __lowercase ( self , _a=-1 ) -> List[str]:
if c == -1:
_a : List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_a : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def __lowercase ( self , _a=-2 ) -> List[Any]:
_a : Any = deque()
_a : List[str] = []
if s == -2:
_a : Any = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
_a : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowercase ( self , _a ) -> int:
return len(self.graph[u] )
def __lowercase ( self , _a=-2 ) -> int:
_a : str = []
_a : str = []
if s == -2:
_a : Any = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Dict = s
_a : Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_a ) != 0:
_a : List[Any] = stack[len(_a ) - 1]
else:
_a : Dict = ss
# check if se have reached the starting point
if len(_a ) == 0:
return sorted_nodes
def __lowercase ( self ) -> Any:
_a : List[str] = []
_a : Optional[Any] = []
_a : Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Any = -2
_a : Optional[int] = []
_a : str = s
_a : List[Any] = False
_a : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : int = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : Union[str, Any] = True
if len(_a ) != 0:
_a : Any = stack[len(_a ) - 1]
else:
_a : Optional[int] = False
indirect_parents.append(_a )
_a : Dict = s
_a : Union[str, Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def __lowercase ( self ) -> Any:
_a : Any = []
_a : Union[str, Any] = []
_a : List[str] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : List[str] = -2
_a : Dict = []
_a : str = s
_a : Optional[Any] = False
_a : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : int = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : Optional[Any] = True
if len(_a ) != 0:
_a : Tuple = stack[len(_a ) - 1]
else:
_a : Union[str, Any] = False
indirect_parents.append(_a )
_a : Union[str, Any] = s
_a : List[Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def __lowercase ( self , _a=-2 , _a=-1 ) -> List[str]:
_a : Dict = time()
self.dfs(_a , _a )
_a : Tuple = time()
return end - begin
def __lowercase ( self , _a=-2 ) -> Optional[int]:
_a : int = time()
self.bfs(_a )
_a : List[Any] = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
_a : Union[str, Any] = {}
def __lowercase ( self , _a , _a , _a=1 ) -> List[Any]:
# check if the u exists
if self.graph.get(_a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a : int = [[w, v]]
# add the other way
if self.graph.get(_a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a : int = [[w, u]]
def __lowercase ( self , _a , _a ) -> List[Any]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
# the other way round
if self.graph.get(_a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_a )
def __lowercase ( self , _a=-2 , _a=-1 ) -> Any:
if s == d:
return []
_a : str = []
_a : Optional[Any] = []
if s == -2:
_a : List[str] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
_a : Dict = stack[len(_a ) - 1]
else:
_a : Union[str, Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def __lowercase ( self , _a=-1 ) -> int:
if c == -1:
_a : Any = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_a : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def __lowercase ( self , _a=-2 ) -> Tuple:
_a : Optional[Any] = deque()
_a : List[Any] = []
if s == -2:
_a : int = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
_a : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowercase ( self , _a ) -> Optional[Any]:
return len(self.graph[u] )
def __lowercase ( self ) -> List[Any]:
_a : List[str] = []
_a : Optional[Any] = []
_a : str = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Any = -2
_a : int = []
_a : Optional[Any] = s
_a : str = False
_a : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : Optional[int] = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : str = True
if len(_a ) != 0:
_a : List[Any] = stack[len(_a ) - 1]
else:
_a : List[Any] = False
indirect_parents.append(_a )
_a : Tuple = s
_a : Tuple = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def __lowercase ( self ) -> List[str]:
_a : str = []
_a : Optional[int] = []
_a : List[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Tuple = -2
_a : List[Any] = []
_a : Dict = s
_a : List[str] = False
_a : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : Union[str, Any] = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : Union[str, Any] = True
if len(_a ) != 0:
_a : Optional[Any] = stack[len(_a ) - 1]
else:
_a : List[str] = False
indirect_parents.append(_a )
_a : Dict = s
_a : str = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def __lowercase ( self ) -> Union[str, Any]:
return list(self.graph )
def __lowercase ( self , _a=-2 , _a=-1 ) -> Union[str, Any]:
_a : Optional[Any] = time()
self.dfs(_a , _a )
_a : str = time()
return end - begin
def __lowercase ( self , _a=-2 ) -> Dict:
_a : Union[str, Any] = time()
self.bfs(_a )
_a : Optional[int] = time()
return end - begin
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 14 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
a__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
a__ = []
a__ = []
a__ = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
a__ = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'''emoji''': True,
},
}
]
a__ = 0
for log in Path().glob('''*.log'''):
a__ = 0
with open(log, '''r''') as f:
for line in f:
a__ = json.loads(line)
if line.get('''nodeid''', '''''') != "":
a__ = line['''nodeid''']
if line.get('''duration''', None) is not None:
a__ = f'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
a__ = []
log.unlink()
a__ = ''''''
a__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
a__ = []
a__ = {}
for test in failed_tests:
a__ = test[0].split('''::''')
a__ = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
a__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
a__ = [test[0] for test in failed_table]
a__ = list(set(files))
# Count number of instances in failed_tests
a__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
a__ = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
a__ = '''Too many failed tests, please see the full report in the Action results.'''
a__ = len(err) + 10
a__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
a__ = '''No failed tests! 🤗'''
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
a__ = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
a__ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
a__ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
a__ = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
a__ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
a__ = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
a__ = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
a__ = row[0]
else:
a__ = ''''''
a__ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 14 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
def __UpperCAmelCase ( __a : List[Any] ) -> Any:
"""simple docstring"""
_a : Tuple = 0
_a : List[str] = len(__a )
for i in range(n - 1 ):
for j in range(i + 1 ,__a ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __UpperCAmelCase ( __a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if len(__a ) <= 1:
return arr, 0
_a : List[Any] = len(__a ) // 2
_a : List[str] = arr[0:mid]
_a : Optional[int] = arr[mid:]
_a , _a : str = count_inversions_recursive(__a )
_a , _a : Tuple = count_inversions_recursive(__a )
_a , _a : int = _count_cross_inversions(__a ,__a )
_a : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ) -> Union[str, Any]:
"""simple docstring"""
_a : str = []
_a : Optional[int] = 0
while i < len(__a ) and j < len(__a ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__a ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__a ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
_a : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_a : Dict = count_inversions_bf(__a )
_a , _a : Dict = count_inversions_recursive(__a )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' ,__a )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_a : Optional[int] = count_inversions_bf(__a )
_a , _a : Union[str, Any] = count_inversions_recursive(__a )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' ,__a )
# an empty list should also have zero inversions
_a : Optional[int] = []
_a : List[Any] = count_inversions_bf(__a )
_a , _a : str = count_inversions_recursive(__a )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' ,__a )
if __name__ == "__main__":
main()
| 14 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a__ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
a__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __UpperCAmelCase ( __a : Any ,__a : List[str]=False ) -> List[Any]:
"""simple docstring"""
_a , _a : List[Any] = create_model(
'''HTSAT-tiny''' ,'''roberta''' ,__a ,precision='''fp32''' ,device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' ,enable_fusion=__a ,fusion_type='''aff_2d''' if enable_fusion else None ,)
return model, model_cfg
def __UpperCAmelCase ( __a : str ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = {}
_a : int = R'''.*sequential.(\d+).*'''
_a : List[Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : List[Any] = key.replace(__a ,__a )
if re.match(__a ,__a ):
# replace sequential layers with list
_a : int = re.match(__a ,__a ).group(1 )
_a : List[str] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(__a )//3}.linear.""" )
elif re.match(__a ,__a ):
_a : Optional[Any] = int(re.match(__a ,__a ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_a : Optional[int] = 1 if projecton_layer == 0 else 2
_a : List[str] = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_a : List[str] = value
_a : Union[str, Any] = mixed_qkv.size(0 ) // 3
_a : Tuple = mixed_qkv[:qkv_dim]
_a : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
_a : Dict = mixed_qkv[qkv_dim * 2 :]
_a : Optional[int] = query_layer
_a : Tuple = key_layer
_a : Tuple = value_layer
else:
_a : Tuple = value
return model_state_dict
def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ,__a : Dict ,__a : Dict=False ) -> Any:
"""simple docstring"""
_a , _a : Optional[Any] = init_clap(__a ,enable_fusion=__a )
clap_model.eval()
_a : Tuple = clap_model.state_dict()
_a : Union[str, Any] = rename_state_dict(__a )
_a : Union[str, Any] = ClapConfig()
_a : Dict = enable_fusion
_a : Dict = ClapModel(__a )
# ignore the spectrogram embedding layer
model.load_state_dict(__a ,strict=__a )
model.save_pretrained(__a )
transformers_config.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
a__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = 1
_a : str = 3
_a : List[str] = (3_2, 3_2)
_a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def __lowercase ( self ) -> List[str]:
torch.manual_seed(0 )
_a : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowercase ( self ) -> int:
torch.manual_seed(0 )
_a : Optional[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_a )
@property
def __lowercase ( self ) -> List[Any]:
def extract(*_a , **_a ):
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
_a : Union[str, Any] = torch.ones([0] )
def __lowercase ( self , _a ) -> Union[str, Any]:
self.pixel_values.to(_a )
return self
return Out()
return extract
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : int = self.dummy_cond_unet
_a : List[Any] = PNDMScheduler(skip_prk_steps=_a )
_a : Optional[Any] = self.dummy_vae
_a : List[str] = self.dummy_text_encoder
_a : List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_a : int = 7_7
_a : List[str] = self.dummy_image.to(_a )
_a : str = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_a : int = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_a : Optional[int] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_a : str = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = '''A painting of a squirrel eating a burger'''
_a : Dict = torch.Generator(device=_a ).manual_seed(0 )
_a : Optional[Any] = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_a , )
_a : int = output.images
_a : List[Any] = torch.Generator(device=_a ).manual_seed(0 )
_a : Union[str, Any] = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_a , return_dict=_a , )[0]
_a : int = image[0, -3:, -3:, -1]
_a : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_a : Optional[int] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowercase ( self ) -> List[str]:
_a : int = self.dummy_cond_unet
_a : List[Any] = PNDMScheduler(skip_prk_steps=_a )
_a : List[str] = self.dummy_vae
_a : str = self.dummy_text_encoder
_a : List[str] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
_a : int = 7_7
_a : Optional[int] = self.dummy_image.to(_a )
# put models in fp16
_a : int = unet.half()
_a : int = vae.half()
_a : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
_a : List[str] = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_a : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_a : Any = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_a : List[str] = '''A painting of a squirrel eating a burger'''
_a : Optional[Any] = torch.manual_seed(0 )
_a : List[str] = alt_pipe(
[prompt] , generator=_a , num_inference_steps=2 , output_type='''np''' , image=_a , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowercase ( self ) -> str:
_a : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
_a : str = init_image.resize((7_6_0, 5_0_4) )
_a : Union[str, Any] = '''BAAI/AltDiffusion'''
_a : Any = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_a : Any = '''A fantasy landscape, trending on artstation'''
_a : str = torch.manual_seed(0 )
_a : Any = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='''np''' , )
_a : List[Any] = output.images[0]
_a : List[str] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
_a : Optional[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a : str = init_image.resize((7_6_8, 5_1_2) )
_a : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
_a : Dict = '''BAAI/AltDiffusion'''
_a : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_a : int = '''A fantasy landscape, trending on artstation'''
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='''np''' , )
_a : Any = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 14 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 1 |
from jiwer import compute_measures
import datasets
a__ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a__ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
a__ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def __lowercase ( self , _a=None , _a=None , _a=False ) -> Optional[int]:
if concatenate_texts:
return compute_measures(_a , _a )["wer"]
else:
_a : Optional[int] = 0
_a : Tuple = 0
for prediction, reference in zip(_a , _a ):
_a : str = compute_measures(_a , _a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = XGLMConfig
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Optional[Any] = "gelu"
def __init__( self , _a , _a=1_4 , _a=7 , _a=True , _a=True , _a=True , _a=9_9 , _a=3_2 , _a=2 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=0.02 , ) -> Dict:
_a : List[str] = parent
_a : Optional[Any] = batch_size
_a : Dict = seq_length
_a : Tuple = is_training
_a : List[str] = use_input_mask
_a : Optional[Any] = use_labels
_a : Dict = vocab_size
_a : List[Any] = d_model
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Tuple = ffn_dim
_a : Any = activation_function
_a : List[str] = activation_dropout
_a : Tuple = attention_dropout
_a : Union[str, Any] = max_position_embeddings
_a : int = initializer_range
_a : Union[str, Any] = None
_a : Any = 0
_a : Tuple = 2
_a : Union[str, Any] = 1
def __lowercase ( self ) -> Optional[int]:
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def __lowercase ( self ) -> Any:
_a : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_a : Any = None
if self.use_input_mask:
_a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Dict = self.get_config()
_a : Tuple = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ) -> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_a , )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Dict = config_and_inputs
_a : List[str] = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ : Tuple = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> str:
_a : str = TFXGLMModelTester(self )
_a : List[str] = ConfigTester(self , config_class=_a , n_embd=3_7 )
def __lowercase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ) -> List[str]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] = TFXGLMModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def __lowercase ( self ) -> str:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , _a=True ) -> Dict:
_a : Any = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
_a : Any = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_a : Union[str, Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
_a : Any = model.generate(_a , do_sample=_a , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _a )
@slow
def __lowercase ( self ) -> List[str]:
_a : List[Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
_a : Optional[int] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
_a : Optional[Any] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
_a : Optional[int] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
_a : List[str] = model.generate(_a , do_sample=_a , seed=[7, 0] )
_a : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
_a : int = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_a , _a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : int = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
_a : Union[str, Any] = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
_a : Tuple = '''left'''
# use different length sentences to test batching
_a : Optional[int] = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
_a : Optional[int] = tokenizer(_a , return_tensors='''tf''' , padding=_a )
_a : int = inputs['''input_ids''']
_a : Optional[Any] = model.generate(input_ids=_a , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
_a : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_a : Optional[Any] = model.generate(input_ids=_a , max_new_tokens=1_2 )
_a : Dict = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_a : int = model.generate(input_ids=_a , max_new_tokens=1_2 )
_a : Union[str, Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
_a : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
_a : Any = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
import random
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
_a : str = num - 1
_a : Dict = 0
while s % 2 == 0:
_a : Dict = s // 2
t += 1
for _ in range(5 ):
_a : Dict = random.randrange(2 ,num - 1 )
_a : List[str] = pow(__a ,__a ,__a )
if v != 1:
_a : Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_a : List[Any] = i + 1
_a : List[Any] = (v**2) % num
return True
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
_a : str = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__a )
def __UpperCAmelCase ( __a : int = 1_024 ) -> int:
"""simple docstring"""
while True:
_a : List[Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(__a ):
return num
if __name__ == "__main__":
a__ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = FunnelTokenizer
UpperCAmelCase__ : List[str] = FunnelTokenizerFast
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Union[str, Any] = True
def __lowercase ( self ) -> Tuple:
super().setUp()
_a : str = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , **_a ) -> Union[str, Any]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> Dict:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Any:
_a : Dict = '''UNwant\u00E9d,running'''
_a : Tuple = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> int:
_a : Any = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> Tuple:
_a : int = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_a : Any = tokenizer('''UNwant\u00E9d,running''' )
_a : Optional[int] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
_a : Tuple = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 14 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a__ = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __UpperCAmelCase ( __a : List[Any]=True ) -> str:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = None
UpperCAmelCase__ : List[str] = None
def __lowercase ( self , _a , _a ) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
_a : Dict = dataset_module_factory(_a , cache_dir=_a )
_a : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=_a )
_a : DatasetBuilder = builder_cls(
cache_dir=_a , config_name=_a , hash=dataset_module.hash , )
_a : int = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_a ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
_a : Optional[int] = cached_path(_a , cache_dir=_a )
self.assertTrue(os.path.exists(_a ) )
@pytest.mark.integration
def __UpperCAmelCase ( __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a : List[str] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
_a : List[str] = dataset_module_factory('''wikipedia''' ,cache_dir=__a )
_a : Tuple = import_main_class(dataset_module.module_path )
_a : DatasetBuilder = builder_cls(
cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_a : int = None
builder_instance.download_and_prepare()
_a : Tuple = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCAmelCase ( __a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = dataset_module_factory('''wikipedia''' ,cache_dir=__a )
_a : Optional[int] = import_main_class(dataset_module.module_path ,dataset=__a )
_a : DatasetBuilder = builder_cls(
cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,)
_a : Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__a ,__a )
assert "train" in ds
assert isinstance(ds['''train'''] ,__a )
assert next(iter(ds['''train'''] ) )
| 14 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : List[Any] = "CLIPImageProcessor"
UpperCAmelCase__ : List[str] = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Tuple:
_a : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : int = kwargs.pop('''feature_extractor''' )
_a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> List[str]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a : Optional[Any] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_a : Optional[int] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_a : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __lowercase ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> List[Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Tuple:
_a : int = self.tokenizer.model_input_names
_a : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = (UnCLIPScheduler,)
def __lowercase ( self , **_a ) -> Optional[Any]:
_a : Tuple = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __lowercase ( self ) -> List[str]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> Union[str, Any]:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __lowercase ( self ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> List[str]:
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_a )
def __lowercase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Optional[Any]:
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = self.scheduler_classes[0]
_a : str = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_a : List[str] = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1e-5
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config(variance_type='''learned_range''' )
_a : Tuple = scheduler_class(**_a )
_a : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_a ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_a ) - -0.001_0011 < 1e-5
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : int = self.get_scheduler_config()
_a : List[Any] = scheduler_class(**_a )
_a : List[Any] = scheduler.timesteps
_a : Tuple = self.dummy_model()
_a : List[str] = self.dummy_sample_deter
_a : Union[str, Any] = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
_a : Union[str, Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
_a : Union[str, Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
_a : Tuple = pred_prev_sample
_a : int = torch.sum(torch.abs(_a ) )
_a : Dict = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.scheduler_classes[0]
_a : Any = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
scheduler.set_timesteps(2_5 )
_a : Optional[int] = scheduler.timesteps
_a : Dict = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
_a : Dict = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
_a : Dict = model(_a , _a )
if i + 1 == timesteps.shape[0]:
_a : Tuple = None
else:
_a : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_a : str = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
_a : Tuple = pred_prev_sample
_a : Any = torch.sum(torch.abs(_a ) )
_a : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> List[Any]:
pass
| 14 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , **_a ) -> List[str]:
super().__init__(**_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(_a )
def __call__( self , _a , _a = None , **_a , ) -> List[str]:
if "text_queries" in kwargs:
_a : Optional[Any] = kwargs.pop('''text_queries''' )
if isinstance(_a , (str, Image.Image) ):
_a : Tuple = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_a : str = image
_a : int = super().__call__(_a , **_a )
return results
def __lowercase ( self , **_a ) -> Union[str, Any]:
_a : Union[str, Any] = {}
if "threshold" in kwargs:
_a : Dict = kwargs['''threshold''']
if "top_k" in kwargs:
_a : Tuple = kwargs['''top_k''']
return {}, {}, postprocess_params
def __lowercase ( self , _a ) -> List[str]:
_a : int = load_image(inputs['''image'''] )
_a : Union[str, Any] = inputs['''candidate_labels''']
if isinstance(_a , _a ):
_a : Tuple = candidate_labels.split(''',''' )
_a : Dict = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_a ):
_a : Union[str, Any] = self.tokenizer(_a , return_tensors=self.framework )
_a : Optional[Any] = self.image_processor(_a , return_tensors=self.framework )
yield {
"is_last": i == len(_a ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __lowercase ( self , _a ) -> str:
_a : Optional[int] = model_inputs.pop('''target_size''' )
_a : int = model_inputs.pop('''candidate_label''' )
_a : str = model_inputs.pop('''is_last''' )
_a : Union[str, Any] = self.model(**_a )
_a : List[str] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def __lowercase ( self , _a , _a=0.1 , _a=None ) -> Any:
_a : str = []
for model_output in model_outputs:
_a : str = model_output['''candidate_label''']
_a : int = BaseModelOutput(_a )
_a : Tuple = self.image_processor.post_process_object_detection(
outputs=_a , threshold=_a , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_a : Optional[Any] = outputs['''scores'''][index].item()
_a : List[str] = self._get_bounding_box(outputs['''boxes'''][index][0] )
_a : Dict = {'''score''': score, '''label''': label, '''box''': box}
results.append(_a )
_a : str = sorted(_a , key=lambda _a : x["score"] , reverse=_a )
if top_k:
_a : Any = results[:top_k]
return results
def __lowercase ( self , _a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_a , _a , _a , _a : Dict = box.int().tolist()
_a : int = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 14 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
_a : str = 2**power
_a : str = 0
while n:
_a , _a : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "nllb-moe"
UpperCAmelCase__ : Optional[int] = ["past_key_values"]
UpperCAmelCase__ : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _a=1_2_8_1_1_2 , _a=1_0_2_4 , _a=1_2 , _a=4_0_9_6 , _a=1_6 , _a=1_2 , _a=4_0_9_6 , _a=1_6 , _a=0.05 , _a=0.05 , _a=True , _a=True , _a="relu" , _a=1_0_2_4 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.02 , _a=2 , _a=True , _a=False , _a="float32" , _a=False , _a=1_2_8 , _a=6_4 , _a=4 , _a=4 , _a=0.001 , _a=0.001 , _a="all" , _a=False , _a=False , _a=1.0 , _a=0.2 , _a=1 , _a=0 , _a=2 , _a=False , **_a , ) -> str:
_a : Optional[Any] = vocab_size
_a : Tuple = max_position_embeddings
_a : Any = d_model
_a : List[Any] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Tuple = encoder_attention_heads
_a : Any = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Tuple = dropout
_a : Dict = attention_dropout
_a : Union[str, Any] = activation_dropout
_a : List[Any] = activation_function
_a : Dict = init_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Optional[int] = use_cache
_a : Optional[Any] = encoder_layers
_a : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = router_z_loss_coef
_a : List[str] = router_aux_loss_coef
_a : int = decoder_sparse_step
_a : List[Any] = encoder_sparse_step
_a : Tuple = num_experts
_a : str = expert_capacity
_a : str = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
_a : int = router_dtype
_a : Optional[Any] = router_ignore_padding_tokens
_a : Tuple = batch_prioritized_routing
_a : Optional[int] = second_expert_policy
_a : Optional[Any] = normalize_router_prob_before_dropping
_a : Optional[int] = moe_eval_capacity_token_fraction
_a : Optional[Any] = moe_token_dropout
_a : Union[str, Any] = output_router_logits
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , **_a , )
| 14 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ = '''src/diffusers'''
# Matches is_xxx_available()
a__ = re.compile(R'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
a__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
a__ = '''
{0} = None
'''
a__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
a__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def __UpperCAmelCase ( __a : Dict ) -> str:
"""simple docstring"""
_a : Optional[Any] = _re_backend.findall(__a )
if len(__a ) == 0:
return None
return "_and_".join(__a )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.join(__a ,'''__init__.py''' ) ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
_a : List[Any] = 0
_a : Optional[int] = {}
# Go through the end of the file
while line_index < len(__a ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_a : Any = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
_a : List[str] = []
# Until we unindent, add backend objects to the list
while line_index < len(__a ) and len(lines[line_index] ) > 1:
_a : Optional[Any] = lines[line_index]
_a : Any = _re_single_line_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__a ) > 0:
_a : Dict = objects
else:
line_index += 1
return backend_specific_objects
def __UpperCAmelCase ( __a : str ,__a : str ) -> str:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__a )
elif name.islower():
return DUMMY_FUNCTION.format(__a ,__a )
else:
return DUMMY_CLASS.format(__a ,__a )
def __UpperCAmelCase ( __a : List[Any]=None ) -> Tuple:
"""simple docstring"""
if backend_specific_objects is None:
_a : Optional[int] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_a : Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
_a : Optional[int] = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']'''
_a : Optional[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__a ,__a ) for o in objects] )
_a : Optional[Any] = dummy_file
return dummy_files
def __UpperCAmelCase ( __a : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
_a : Any = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_a : Dict = {'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
_a : Union[str, Any] = os.path.join(__a ,'''utils''' )
_a : List[Any] = {
backend: os.path.join(__a ,F"""dummy_{short_names.get(__a ,__a )}_objects.py""" )
for backend in dummy_files.keys()
}
_a : List[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__a ):
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Dict = f.read()
else:
_a : str = ''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(__a ,__a )}_objects.py as the main """
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"""diffusers.utils.dummy_{short_names.get(__a ,__a )}_objects.py. Run `make fix-copies` """
'''to fix this.''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
a__ = '''\
Text data.
Second line of data.'''
a__ = '''file'''
@pytest.fixture(scope='''session''' )
def __UpperCAmelCase ( __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a : int = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_a : Optional[int] = bytes(__a ,'''utf-8''' )
with zstd.open(__a ,'''wb''' ) as f:
f.write(__a )
return path
@pytest.fixture
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir ,__a ) ,'''w''' ) as f:
f.write(__a )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''] )
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ,__a : Optional[Any] ,__a : str ,__a : str ,__a : Optional[int] ) -> str:
"""simple docstring"""
_a : Any = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_a : Union[str, Any] = input_paths[compression_format]
_a : Dict = tmp_path / '''cache'''
_a : List[str] = DownloadConfig(cache_dir=__a ,extract_compressed_file=__a )
_a : str = cached_path(__a ,download_config=__a )
with open(__a ) as f:
_a : List[Any] = f.read()
with open(__a ) as f:
_a : Any = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' ,[True, False] )
@pytest.mark.parametrize('''default_cache_dir''' ,[True, False] )
def __UpperCAmelCase ( __a : str ,__a : int ,__a : str ,__a : Dict ,__a : Tuple ) -> int:
"""simple docstring"""
_a : Union[str, Any] = '''custom_cache'''
_a : Union[str, Any] = '''custom_extracted_dir'''
_a : Union[str, Any] = tmp_path / '''custom_extracted_path'''
if default_extracted:
_a : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,__a )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(__a ) )
_a : Tuple = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_a : Dict = xz_file
_a : int = (
DownloadConfig(extract_compressed_file=__a )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=__a )
)
_a : int = cached_path(__a ,download_config=__a )
assert Path(__a ).parent.parts[-2:] == expected
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = str(Path(__a ).resolve() )
assert cached_path(__a ) == text_file
# relative path
_a : Optional[int] = str(Path(__a ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__a ) == text_file
def __UpperCAmelCase ( __a : Optional[Any] ) -> Dict:
"""simple docstring"""
_a : Union[str, Any] = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__a ):
cached_path(__a )
# relative path
_a : Tuple = '''./__missing_file__.txt'''
with pytest.raises(__a ):
cached_path(__a )
def __UpperCAmelCase ( __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_a : Tuple = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__a ) as f:
_a : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( __a : Any ) -> int:
"""simple docstring"""
_a : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__a ):
http_get('''https://huggingface.co''' ,temp_file=__a )
with pytest.raises(__a ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( __a : Tuple ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__a ):
ftp_get('''ftp://huggingface.co''' ,temp_file=__a )
with pytest.raises(__a ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,__a )
def __UpperCAmelCase ( __a : Dict ) -> Dict:
"""simple docstring"""
_a : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__a ):
fsspec_get('''s3://huggingface.co''' ,temp_file=__a )
with pytest.raises(__a ):
fsspec_head('''s3://huggingface.co''' )
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "xlm-roberta-xl"
def __init__( self , _a=2_5_0_8_8_0 , _a=2_5_6_0 , _a=3_6 , _a=3_2 , _a=1_0_2_4_0 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_4 , _a=1 , _a=0.02 , _a=1e-0_5 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> Optional[int]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : List[Any] = vocab_size
_a : Any = hidden_size
_a : int = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Optional[int] = hidden_act
_a : Optional[Any] = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : Dict = type_vocab_size
_a : Union[str, Any] = initializer_range
_a : str = layer_norm_eps
_a : str = position_embedding_type
_a : Optional[int] = use_cache
_a : str = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=9_9 , _a=1_3 , _a=1_6 , _a=7 , _a=True , _a=True , _a=True , _a=False , _a=True , _a=2 , _a=3_2 , _a=4 , _a=4 , _a=3_0 , _a=0 , _a=1 , _a=2 , _a=None , ) -> Any:
_a : int = parent
_a : List[Any] = batch_size
_a : int = decoder_seq_length
# For common tests
_a : List[Any] = self.decoder_seq_length
_a : Tuple = is_training
_a : str = use_attention_mask
_a : Tuple = use_labels
_a : List[str] = vocab_size
_a : Tuple = d_model
_a : List[str] = d_model
_a : Optional[int] = decoder_layers
_a : Dict = decoder_layers
_a : Dict = decoder_ffn_dim
_a : Tuple = decoder_attention_heads
_a : List[str] = decoder_attention_heads
_a : Optional[int] = eos_token_id
_a : int = bos_token_id
_a : str = pad_token_id
_a : Tuple = decoder_start_token_id
_a : List[Any] = use_cache
_a : Any = max_position_embeddings
_a : Any = None
_a : List[Any] = decoder_seq_length
_a : str = 2
_a : List[str] = 1
def __lowercase ( self ) -> Dict:
_a : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_a : Tuple = None
if self.use_attention_mask:
_a : str = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_a : str = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_a : Optional[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __lowercase ( self , _a , _a , _a , _a , ) -> List[Any]:
_a : List[Any] = True
_a : str = TrOCRDecoder(config=_a ).to(_a ).eval()
_a : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_a : List[Any] = model(_a , use_cache=_a )
_a : str = model(_a )
_a : List[Any] = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
_a : Tuple = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_a : Tuple = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_a : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
_a : Any = model(_a )['''last_hidden_state''']
_a : Optional[Any] = model(_a , past_key_values=_a )['''last_hidden_state''']
# select random slice
_a : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_a : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_a , _a , atol=1e-3 )
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.prepare_config_and_inputs()
_a , _a , _a , _a : Union[str, Any] = config_and_inputs
_a : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Tuple:
_a : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=_a )
_a : Dict = ConfigTester(self , config_class=_a )
def __lowercase ( self ) -> Optional[Any]:
pass
def __lowercase ( self ) -> Optional[Any]:
pass
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_a )
def __lowercase ( self ) -> List[str]:
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __lowercase ( self ) -> str:
pass
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase__ : ClassVar[Features] = Features({"text": Value("string" )} )
UpperCAmelCase__ : ClassVar[Features] = Features({} )
UpperCAmelCase__ : str = "text"
@property
def __lowercase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 14 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __UpperCAmelCase ( __a : Any ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_a : List[Any] = k.replace(__a ,__a )
if k.startswith('''encoder''' ):
_a : Dict = k.replace('''.attn''' ,'''.self_attn''' )
_a : Optional[int] = k.replace('''norm1''' ,'''self_attn_layer_norm''' )
_a : Union[str, Any] = k.replace('''norm2''' ,'''final_layer_norm''' )
elif k.startswith('''decoder''' ):
_a : int = k.replace('''norm1''' ,'''self_attn_layer_norm''' )
_a : Any = k.replace('''norm2''' ,'''encoder_attn_layer_norm''' )
_a : List[Any] = k.replace('''norm3''' ,'''final_layer_norm''' )
return k
def __UpperCAmelCase ( __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a : List[str] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
_a : List[str] = sd.pop(__a )
_a : Any = k.replace('''layernorm_embedding''' ,'''layer_norm''' )
assert new_k not in sd
_a : str = v
a__ = ['''START''']
@torch.no_grad()
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> str:
"""simple docstring"""
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )
_a : Optional[int] = model['''model''']
_a : Tuple = BlenderbotConfig.from_json_file(__a )
_a : Tuple = BlenderbotForConditionalGeneration(__a )
_a : Union[str, Any] = m.model.state_dict().keys()
_a : str = []
_a : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_a : str = rename_state_dict_key(__a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_a : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__a )
m.model.load_state_dict(__a ,strict=__a )
m.half()
m.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
a__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 14 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE__ : str = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
__magic_name__ :Optional[int] = self.diffusers_dir
shutil.copy(
os.path.join(__lowerCAmelCase , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
__magic_name__ :Tuple = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__magic_name__ :List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__magic_name__ :List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
__magic_name__ :Dict = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
__magic_name__ :int = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(__lowerCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __lowerCAmelCase ) , )
# Copy consistency with a really long name
__magic_name__ :List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('''Bert''' , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __lowerCAmelCase , overwrite_result=re.sub('''DDPM''' , '''Test''' , __lowerCAmelCase ) , )
| 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
__snake_case = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_INIT_CONFIGURATION
_lowercase = RetriBertTokenizer
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int],A_: int=None,A_: Optional[Any]=None,A_: List[Any]=True,A_: Any="[UNK]",A_: List[Any]="[SEP]",A_: Optional[Any]="[PAD]",A_: Dict="[CLS]",A_: Union[str, Any]="[MASK]",A_: Optional[Any]=True,A_: Dict=None,**A_: Dict,):
'''simple docstring'''
super().__init__(
A_,tokenizer_file=A_,do_lower_case=A_,unk_token=A_,sep_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,tokenize_chinese_chars=A_,strip_accents=A_,**A_,)
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase',A_ ) != do_lower_case
or normalizer_state.get('strip_accents',A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars',A_ ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(A_,normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**A_ )
__UpperCamelCase = do_lower_case
def snake_case_ ( self: List[str],A_: Tuple,A_: List[Any]=None ):
'''simple docstring'''
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self: str,A_: str,A_: Optional[str] = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(A_,name=A_ )
return tuple(A_ )
| 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[List, PIL.Image.Image, torch.Tensor] ) -> List[Any]:
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _snake_case , )
if isinstance(_snake_case , torch.Tensor ):
return image
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A , _A = image[0].size
_A , _A = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
_A = np.concatenate(_snake_case , axis=0 )
_A = np.array(_snake_case ).astype(np.floataa ) / 255.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(_snake_case )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return image
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[List, PIL.Image.Image, torch.Tensor] ) -> Optional[Any]:
if isinstance(_snake_case , torch.Tensor ):
return mask
elif isinstance(_snake_case , PIL.Image.Image ):
_A = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_A , _A = mask[0].size
_A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
_A = np.concatenate(_snake_case , axis=0 )
_A = mask.astype(np.floataa ) / 255.0
_A = 0
_A = 1
_A = torch.from_numpy(_snake_case )
elif isinstance(mask[0] , torch.Tensor ):
_A = torch.cat(_snake_case , dim=0 )
return mask
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : UNetaDModel
a__ : RePaintScheduler
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCAmelCase : int = 2_50 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 10 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
_A = image
_A = _preprocess_image(__lowerCAmelCase )
_A = original_image.to(device=self.device , dtype=self.unet.dtype )
_A = _preprocess_mask(__lowerCAmelCase )
_A = mask_image.to(device=self.device , dtype=self.unet.dtype )
_A = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = original_image.shape
_A = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.device )
_A = eta
_A = self.scheduler.timesteps[0] + 1
_A = generator[0] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_A = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute previous image: x_t -> x_t-1
_A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_A = self.scheduler.undo_step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = t
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 2 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
lowerCAmelCase : List[Any] = range(2, 20 + 1)
lowerCAmelCase : Union[str, Any] = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def A_( A : Any , A : Dict , A : str , A : str):
UpperCamelCase = sum(a_i[j] for j in range(A , len(A)))
UpperCamelCase = sum(a_i[j] * base[j] for j in range(min(len(A) , A)))
UpperCamelCase , UpperCamelCase = 0, 0
UpperCamelCase = n - i
UpperCamelCase = memo.get(A)
if sub_memo is not None:
UpperCamelCase = sub_memo.get(A)
if jumps is not None and len(A) > 0:
# find and make the largest jump without going over
UpperCamelCase = -1
for _k in range(len(A) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase = _k
break
if max_jump >= 0:
UpperCamelCase , UpperCamelCase , UpperCamelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase = diff + c
for j in range(min(A , len(A))):
UpperCamelCase , UpperCamelCase = divmod(A , 10)
if new_c > 0:
add(A , A , A)
else:
UpperCamelCase = []
else:
UpperCamelCase = {c: []}
UpperCamelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase , UpperCamelCase = next_term(A , k - 1 , i + dn , A)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase , UpperCamelCase = compute(A , A , i + dn , A)
diff += _diff
dn += terms_jumped
UpperCamelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase = 0
while j < len(A):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k))
return (diff, dn)
def A_( A : int , A : Optional[int] , A : str , A : List[str]):
if i >= n:
return 0, i
if k > len(A):
a_i.extend([0 for _ in range(k - len(A))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase = i
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 0, 0
for j in range(len(A)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase = ds_c + ds_b
diff += addend
UpperCamelCase = 0
for j in range(A):
UpperCamelCase = a_i[j] + addend
UpperCamelCase , UpperCamelCase = divmod(A , 10)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A)
return diff, i - start_i
def A_( A : str , A : Dict , A : Tuple):
for j in range(A , len(A)):
UpperCamelCase = digits[j] + addend
if s >= 10:
UpperCamelCase , UpperCamelCase = divmod(A , 10)
UpperCamelCase = addend // 10 + quotient
else:
UpperCamelCase = s
UpperCamelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase , UpperCamelCase = divmod(A , 10)
digits.append(A)
def A_( A : int = 10**15):
UpperCamelCase = [1]
UpperCamelCase = 1
UpperCamelCase = 0
while True:
UpperCamelCase , UpperCamelCase = next_term(A , 20 , i + dn , A)
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase = 0
for j in range(len(A)):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCamelCase : int = '''pt'''
elif is_tf_available():
__UpperCamelCase : Optional[Any] = '''tf'''
else:
__UpperCamelCase : int = '''jax'''
class a ( a__ , unittest.TestCase ):
snake_case__ = ByTaTokenizer
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=20 , _snake_case=5 ):
"""simple docstring"""
lowerCAmelCase = []
for i in range(len(_snake_case ) ):
try:
lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase = list(filter(lambda _snake_case : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _snake_case ) )
lowerCAmelCase = list(filter(lambda _snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case ) , _snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
lowerCAmelCase = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
lowerCAmelCase = ' ' + output_txt
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
return output_txt, output_ids
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCAmelCase = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = 'Unicode €.'
lowerCAmelCase = tokenizer(_snake_case )
lowerCAmelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , _snake_case )
# decoding
lowerCAmelCase = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , 'Unicode €.</s>' )
lowerCAmelCase = tokenizer('e è é ê ë' )
lowerCAmelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , _snake_case )
# decoding
lowerCAmelCase = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
if FRAMEWORK != "jax":
lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _snake_case )
self.assertIn('attention_mask' , _snake_case )
self.assertNotIn('decoder_input_ids' , _snake_case )
self.assertNotIn('decoder_attention_mask' , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase = tokenizer(
text_target=_snake_case , max_length=32 , padding='max_length' , truncation=_snake_case , return_tensors=_snake_case )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.ta_base_tokenizer
lowerCAmelCase = ['A long paragraph for summarization. </s>']
lowerCAmelCase = ['Summary of the text. </s>']
# fmt: off
lowerCAmelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase = tokenizer(_snake_case , text_target=_snake_case )
self.assertEqual(_snake_case , batch['input_ids'][0] )
self.assertEqual(_snake_case , batch['labels'][0] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_snake_case )
lowerCAmelCase = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
shutil.rmtree(_snake_case )
lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_snake_case )
lowerCAmelCase = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase = json.load(_snake_case )
with open(os.path.join(_snake_case , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase = json.load(_snake_case )
lowerCAmelCase = [F'<extra_id_{i}>' for i in range(1_25 )]
lowerCAmelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_snake_case , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_snake_case , _snake_case )
with open(os.path.join(_snake_case , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_snake_case , _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_snake_case )]
lowerCAmelCase = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
lowerCAmelCase = tokenizer_class.from_pretrained(_snake_case )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCAmelCase = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase = 0
lowerCAmelCase = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case )
for attr in attributes_list:
setattr(_snake_case , attr + '_id' , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + '_id' ) , _snake_case )
setattr(_snake_case , attr + '_id' , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + '_id' ) , _snake_case )
setattr(_snake_case , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens_ids' ) , [] )
setattr(_snake_case , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(4 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
cpu_targs.append(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , aligned_edge=_lowercase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
_lowerCAmelCase = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase ) , Write(_lowercase ) )
self.play(Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
first_animations.append(GrowFromCenter(_lowercase , run_time=1 ) )
_lowerCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "LayoutLMv3ImageProcessor"
lowerCamelCase_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self :Dict , __A :Union[str, Any]=None , __A :Union[str, Any]=None , **__A :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :Dict , __A :List[str] , __A :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __A :Union[List[List[int]], List[List[List[int]]]] = None , __A :Optional[Union[List[int], List[List[int]]]] = None , __A :bool = True , __A :Union[bool, str, PaddingStrategy] = False , __A :Union[bool, str, TruncationStrategy] = None , __A :Optional[int] = None , __A :int = 0 , __A :Optional[int] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = True , __A :Optional[Union[str, TensorType]] = None , **__A :Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
SCREAMING_SNAKE_CASE__ = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE__ = features["""words"""]
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
SCREAMING_SNAKE_CASE__ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE__ = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] )
SCREAMING_SNAKE_CASE__ = images
return encoded_inputs
def _snake_case ( self :str , __A :Optional[int] , __A :int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def _snake_case ( self :List[Any] , *__A :int , **__A :Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Tuple , *__A :int , **__A :str ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_A = 4
_A = (1 << p) - 1
for _ in range(p - 2 ):
_A = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 7 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : int ) -> tuple[float, list[float]]:
__A : int = list(range(len(__snake_case ) ) )
__A : Optional[Any] = [v / w for v, w in zip(__snake_case , __snake_case )]
index.sort(key=lambda __snake_case : ratio[i] , reverse=__snake_case )
__A : float = 0
__A : list[float] = [0] * len(__snake_case )
for i in index:
if weight[i] <= capacity:
__A : Optional[int] = 1
max_value += value[i]
capacity -= weight[i]
else:
__A : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = ["image_processor", "tokenizer"]
A__ : str = "LayoutLMv3ImageProcessor"
A__ : Tuple = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Optional[int] , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , **_snake_case : int ):
"""simple docstring"""
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _snake_case , )
A__ = kwargs.pop('feature_extractor' )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_snake_case , _snake_case )
def __call__( self : Optional[int] , _snake_case : List[str] , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _snake_case : Union[List[List[int]], List[List[List[int]]]] = None , _snake_case : Optional[Union[List[int], List[List[int]]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
A__ = self.image_processor(images=_snake_case , return_tensors=_snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_snake_case , _snake_case ):
A__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
A__ = features['words']
A__ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel values
A__ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
A__ = self.get_overflowing_images(_snake_case , encoded_inputs['overflow_to_sample_mapping'] )
A__ = images
return encoded_inputs
def _a ( self : int , _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(_snake_case )} and {len(_snake_case )}''' )
return images_with_overflow
def _a ( self : Union[str, Any] , *_snake_case : List[Any] , **_snake_case : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : List[str] , *_snake_case : Optional[int] , **_snake_case : int ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _snake_case , )
return self.image_processor_class
@property
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _snake_case , )
return self.image_processor
| 9 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input train ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether ot not to use whole word mask."} )
UpperCAmelCase = field(
default=0.1_5, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
UpperCAmelCase = field(
default=1 / 6, metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
}, )
UpperCAmelCase = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
UpperCAmelCase = field(
default=-1, metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( __snake_case , __snake_case , __snake_case = False , __snake_case = None , ):
def _dataset(__snake_case , __snake_case=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , ref_path=__snake_case , )
return LineByLineTextDataset(tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
_UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase = AutoModelWithLMHead.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
_UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , evaluate=__snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=__snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=__snake_case , mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=__snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , data_collator=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , prediction_loss_only=__snake_case , )
# Training
if training_args.do_train:
_UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = math.exp(eval_output['''eval_loss'''] )
_UpperCamelCase = {'''perplexity''': perplexity}
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 0 |
from manim import *
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = Rectangle(height=0.5 , width=0.5)
lowercase__ : Tuple = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
lowercase__ : List[Any] = [mem.copy() for i in range(6)]
lowercase__ : Tuple = [mem.copy() for i in range(6)]
lowercase__ : List[str] = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Tuple = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : List[Any] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Dict = Text("""CPU""" , font_size=24)
lowercase__ : List[str] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
cpu.move_to([-2.5, -0.5, 0])
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : str = [mem.copy() for i in range(1)]
lowercase__ : Any = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[int] = Text("""GPU""" , font_size=24)
lowercase__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
gpu.set_x(gpu.get_x() - 1)
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = [mem.copy() for i in range(6)]
lowercase__ : str = VGroup(*SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0)
lowercase__ : Optional[Any] = Text("""Model""" , font_size=24)
lowercase__ : Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_)
model.move_to([3, -1.0, 0])
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , Create(SCREAMING_SNAKE_CASE_ , run_time=1) , )
lowercase__ : Optional[Any] = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowercase__ : Any = Square(side_length=2.2)
key.move_to([-5, 2, 0])
lowercase__ : Optional[int] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5) , Write(SCREAMING_SNAKE_CASE_) , Write(SCREAMING_SNAKE_CASE_))
self.add(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = []
lowercase__ : Union[str, Any] = []
lowercase__ : str = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_):
lowercase__ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7)
cpu_target.move_to(SCREAMING_SNAKE_CASE_)
cpu_target.generate_target()
lowercase__ : Union[str, Any] = 0.4_6 / 4
lowercase__ : Tuple = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0)
cpu_targs.append(SCREAMING_SNAKE_CASE_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(SCREAMING_SNAKE_CASE_))
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5))
self.play(*SCREAMING_SNAKE_CASE_)
self.play(*SCREAMING_SNAKE_CASE_)
self.wait()
| 12 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__ ( ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__lowerCamelCase : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
return image
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Tuple:
__lowerCamelCase : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> int:
__lowerCamelCase : List[Any] = dct.pop(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = val
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCamelCase : Tuple = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
__lowerCamelCase : int = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
__lowerCamelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
__lowerCamelCase : Any = qkv_bias
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> Optional[int]:
__lowerCamelCase : int = 3_64 if 'coco' in model_name else 2_24
__lowerCamelCase : List[Any] = BlipaVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__lowerCamelCase : Union[str, Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
__lowerCamelCase : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "t5-xl" in model_name:
__lowerCamelCase : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__lowerCamelCase : Tuple = BlipaConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Union[str, Any]=False ) -> Dict:
__lowerCamelCase : Tuple = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__lowerCamelCase : Union[str, Any] = tokenizer('\n' , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__lowerCamelCase , __lowerCamelCase : str = get_blipa_config(UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
__lowerCamelCase : Tuple = BlipaForConditionalGeneration(UpperCAmelCase_ ).eval()
__lowerCamelCase : int = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__lowerCamelCase , __lowerCamelCase : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__lowerCamelCase : Any = 'cuda' if torch.cuda.is_available() else 'cpu'
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
__lowerCamelCase : Dict = original_model.state_dict()
__lowerCamelCase : List[Any] = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCamelCase : Optional[Any] = state_dict.pop(UpperCAmelCase_ )
if key.startswith('Qformer.bert' ):
__lowerCamelCase : Dict = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__lowerCamelCase : int = key.replace('self' , 'attention' )
if "opt_proj" in key:
__lowerCamelCase : Any = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__lowerCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__lowerCamelCase : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__lowerCamelCase : Optional[int] = key.replace('t5' , 'language' )
__lowerCamelCase : str = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : List[str] = hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__lowerCamelCase : Optional[Any] = load_demo_image()
__lowerCamelCase : Any = vis_processors['eval'](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
__lowerCamelCase : Dict = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(UpperCAmelCase_ )
# create processor
__lowerCamelCase : Tuple = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
__lowerCamelCase : List[str] = BlipaProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
__lowerCamelCase : str = processor(images=UpperCAmelCase_ , return_tensors='pt' ).pixel_values.to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "opt" in model_name:
__lowerCamelCase : Optional[Any] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__lowerCamelCase : List[str] = hf_model(UpperCAmelCase_ , UpperCAmelCase_ ).logits
else:
__lowerCamelCase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__lowerCamelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__lowerCamelCase : Dict = hf_model(UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__lowerCamelCase : List[Any] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=UpperCAmelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__lowerCamelCase : Tuple = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=UpperCAmelCase_ )
else:
# cast to same type
__lowerCamelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(UpperCAmelCase_ ) , UpperCAmelCase_ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__lowerCamelCase : str = ''
__lowerCamelCase : str = tokenizer(UpperCAmelCase_ , return_tensors='pt' ).input_ids.to(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = original_model.generate({'image': original_pixel_values} )
__lowerCamelCase : int = hf_model.generate(
UpperCAmelCase_ , UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , UpperCAmelCase_ )
__lowerCamelCase : Any = input_ids.shape[1]
__lowerCamelCase : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCAmelCase_ )
__lowerCamelCase : Dict = [text.strip() for text in output_text]
print('HF generation:' , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
A__ : Optional[int] = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
A__ : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from scipy.stats import spearmanr
import datasets
A : int = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
A : Any = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
A : Union[str, Any] = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
lowercase__ = spearmanr(_UpperCAmelCase , _UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 15 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __a ( A__ : Optional[Any] , A__ : List[str] , A__ : Optional[int]=0 ):
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
SCREAMING_SNAKE_CASE = fmt.format(A__ )
# Print and recurse (if needed).
if isinstance(A__ , A__ ):
if msg is not None:
print(A__ )
for k in val.keys():
recursive_print(A__ , val[k] , spaces + 2 )
elif isinstance(A__ , torch.Tensor ):
print(A__ , ":" , val.size() )
else:
print(A__ , ":" , A__ )
def __a ( A__ : Tuple , A__ : List[Any] , A__ : Tuple , A__ : Optional[Any] , A__ : Dict ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE = param.view(*A__ )
SCREAMING_SNAKE_CASE = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE = param.view(*A__ )
SCREAMING_SNAKE_CASE = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE = param.view(*A__ )
return param
def __a ( A__ : Optional[Any] , A__ : Tuple , A__ : Any ):
# The converted output model.
SCREAMING_SNAKE_CASE = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE = input_state_dict.get("args" , A__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE = ds_args.hidden_size
SCREAMING_SNAKE_CASE = ds_args.num_layers
SCREAMING_SNAKE_CASE = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE = input_state_dict["checkpoint_version"]
else:
SCREAMING_SNAKE_CASE = 0.0
# The model.
SCREAMING_SNAKE_CASE = input_state_dict["model"]
# The language model.
SCREAMING_SNAKE_CASE = model["language_model"]
# The embeddings.
SCREAMING_SNAKE_CASE = lm["embedding"]
# The word embeddings.
SCREAMING_SNAKE_CASE = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
SCREAMING_SNAKE_CASE = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE = layer_re.match(A__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
SCREAMING_SNAKE_CASE = "ln_1" if op_name.startswith("input" ) else "ln_2"
SCREAMING_SNAKE_CASE = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A__ , A__ )
SCREAMING_SNAKE_CASE = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE = masked_bias
SCREAMING_SNAKE_CASE = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Store. No change of shape.
SCREAMING_SNAKE_CASE = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE = transformer["final_layernorm.weight"]
SCREAMING_SNAKE_CASE = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE = word_embeddings
# It should be done!
return output_state_dict
def __a ( ):
# Create the argument parser.
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=A__ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=A__ , help="An optional config json file describing the pre-trained model." , )
SCREAMING_SNAKE_CASE = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
else:
SCREAMING_SNAKE_CASE = torch.load(args.path_to_checkpoint , map_location="cpu" )
SCREAMING_SNAKE_CASE = input_state_dict.get("args" , A__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE = "gelu_fast"
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE = "gelu_new"
else:
SCREAMING_SNAKE_CASE = "gelu"
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE = "gelu_new"
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=A__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=A__ , summary_activation=A__ , summary_proj_to_labels=A__ , summary_first_dropout=0.1 , scale_attn_weights=A__ , use_cache=A__ , bos_token_id=50256 , eos_token_id=50256 , )
else:
SCREAMING_SNAKE_CASE = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
SCREAMING_SNAKE_CASE = convert_megatron_checkpoint(A__ , A__ , A__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A__ , A__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
SCREAMING_SNAKE_CASE = "gpt2"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE = type(A__ ).__name__
SCREAMING_SNAKE_CASE = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(A__ )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(A__ )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE = os.path.join(A__ , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(A__ , A__ )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 16 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _lowercase ):
_lowercase : str = (DDPMParallelScheduler,)
def lowerCAmelCase_ ( self : List[Any] , **__A : int ):
__A : int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__A )
return config
def lowerCAmelCase_ ( self : List[str] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCAmelCase_ ( self : int ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def lowerCAmelCase_ ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def lowerCAmelCase_ ( self : Any ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__A )
def lowerCAmelCase_ ( self : List[Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def lowerCAmelCase_ ( self : int ):
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def lowerCAmelCase_ ( self : int ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCAmelCase_ ( self : int ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=__A )
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : str = self.scheduler_classes[0]
__A : Tuple = self.get_scheduler_config()
__A : int = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def lowerCAmelCase_ ( self : Optional[int] ):
__A : int = self.scheduler_classes[0]
__A : Optional[int] = self.get_scheduler_config()
__A : List[Any] = scheduler_class(**__A )
__A : Any = len(__A )
__A : Tuple = self.dummy_model()
__A : Union[str, Any] = self.dummy_sample_deter
__A : List[Any] = self.dummy_sample_deter + 0.1
__A : Optional[Any] = self.dummy_sample_deter - 0.1
__A : Dict = samplea.shape[0]
__A : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
__A : Dict = torch.arange(__A )[0:3, None].repeat(1 , __A )
__A : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__A : Dict = scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__A : List[str] = torch.sum(torch.abs(__A ) )
__A : List[str] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def lowerCAmelCase_ ( self : Optional[int] ):
__A : int = self.scheduler_classes[0]
__A : int = self.get_scheduler_config()
__A : Dict = scheduler_class(**__A )
__A : Optional[Any] = len(__A )
__A : Tuple = self.dummy_model()
__A : Optional[Any] = self.dummy_sample_deter
__A : List[str] = torch.manual_seed(0 )
for t in reversed(range(__A ) ):
# 1. predict noise residual
__A : str = model(__A , __A )
# 2. predict previous mean of sample x_t-1
__A : List[str] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
__A : Union[str, Any] = pred_prev_sample
__A : List[Any] = torch.sum(torch.abs(__A ) )
__A : Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowerCAmelCase_ ( self : int ):
__A : int = self.scheduler_classes[0]
__A : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
__A : int = scheduler_class(**__A )
__A : Dict = len(__A )
__A : Any = self.dummy_model()
__A : str = self.dummy_sample_deter
__A : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(__A ) ):
# 1. predict noise residual
__A : Optional[Any] = model(__A , __A )
# 2. predict previous mean of sample x_t-1
__A : Tuple = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
__A : Dict = pred_prev_sample
__A : Union[str, Any] = torch.sum(torch.abs(__A ) )
__A : Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowerCAmelCase_ ( self : Any ):
__A : List[str] = self.scheduler_classes[0]
__A : Any = self.get_scheduler_config()
__A : Optional[Any] = scheduler_class(**__A )
__A : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__A )
__A : List[Any] = scheduler.timesteps
for i, timestep in enumerate(__A ):
if i == len(__A ) - 1:
__A : List[str] = -1
else:
__A : int = timesteps[i + 1]
__A : Any = scheduler.previous_timestep(__A )
__A : Any = prev_t.item()
self.assertEqual(__A , __A )
def lowerCAmelCase_ ( self : Any ):
__A : str = self.scheduler_classes[0]
__A : int = self.get_scheduler_config()
__A : Optional[int] = scheduler_class(**__A )
__A : Any = [100, 87, 50, 51, 0]
with self.assertRaises(__A , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__A )
def lowerCAmelCase_ ( self : Any ):
__A : List[str] = self.scheduler_classes[0]
__A : Dict = self.get_scheduler_config()
__A : Tuple = scheduler_class(**__A )
__A : Tuple = [100, 87, 50, 1, 0]
__A : Union[str, Any] = len(__A )
with self.assertRaises(__A , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__A , timesteps=__A )
def lowerCAmelCase_ ( self : Any ):
__A : Tuple = self.scheduler_classes[0]
__A : Optional[int] = self.get_scheduler_config()
__A : Dict = scheduler_class(**__A )
__A : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__A , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__A )
| 17 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE_ , digit + 1 ):
_lowerCAmelCase = []
_lowerCAmelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCAmelCase( lowerCamelCase ):
# to overwrite at feature extractactor specific tests
lowercase__ = None
lowercase__ = None
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(__a , '''feature_size'''))
self.assertTrue(hasattr(__a , '''sampling_rate'''))
self.assertTrue(hasattr(__a , '''padding_value'''))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(__a) == len(__a) for x, y in zip(__a , processed_features[input_name])))
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a)
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''')
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a)
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''')
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__a)
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''')
_UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape) < 3:
_UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self , __a=False) -> Union[str, Any]:
'''simple docstring'''
def _inputs_have_equal_length(__a):
_UpperCamelCase = len(input[0])
for input_slice in input[1:]:
if len(__a) != length:
return False
return True
def _inputs_are_equal(__a , __a):
if len(__a) != len(__a):
return False
for input_slice_a, input_slice_a in zip(__a , __a):
if not np.allclose(np.asarray(__a) , np.asarray(__a) , atol=1e-3):
return False
return True
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__a)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = self.feat_extract_tester.seq_length_diff
_UpperCamelCase = self.feat_extract_tester.max_seq_length + pad_diff
_UpperCamelCase = self.feat_extract_tester.min_seq_length
_UpperCamelCase = self.feat_extract_tester.batch_size
_UpperCamelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_UpperCamelCase = feat_extract.pad(__a , padding=__a)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''')
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''max_length''' , max_length=len(speech_inputs[-1]))
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , return_tensors='''np''')
_UpperCamelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__a):
feat_extract.pad(__a , padding='''max_length''')[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=__a , return_tensors='''np''')
_UpperCamelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__a))
self.assertTrue(_inputs_have_equal_length(__a))
self.assertTrue(_inputs_have_equal_length(__a))
self.assertTrue(_inputs_are_equal(__a , __a))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
_UpperCamelCase = feat_extract.pad(__a , pad_to_multiple_of=10)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , pad_to_multiple_of=10)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__a)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , pad_to_multiple_of=10 , max_length=__a , return_tensors='''np''' , )
_UpperCamelCase = input_a[input_name]
self.assertTrue(all(len(__a) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(__a , __a))
_UpperCamelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__a) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
_UpperCamelCase = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1e-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1e-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1e-3)
def UpperCAmelCase ( self , __a=False) -> List[Any]:
'''simple docstring'''
def _inputs_have_equal_length(__a):
_UpperCamelCase = len(input[0])
for input_slice in input[1:]:
if len(__a) != length:
return False
return True
def _inputs_are_equal(__a , __a):
if len(__a) != len(__a):
return False
for input_slice_a, input_slice_a in zip(__a , __a):
if not np.allclose(np.asarray(__a) , np.asarray(__a) , atol=1e-3):
return False
return True
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__a)
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
# truncate to smallest
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[0]) , truncation=__a)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''max_length''' , max_length=len(speech_inputs[0]))
_UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__a))
self.assertFalse(_inputs_have_equal_length(__a))
# truncate to smallest with np
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''' , truncation=__a , )
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[0]) , return_tensors='''np''')
_UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__a))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__a))
# truncate to middle
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=__a , return_tensors='''np''' , )
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[1]) , truncation=__a)
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[1]) , return_tensors='''np''')
_UpperCamelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(__a))
self.assertTrue(_inputs_have_equal_length(__a))
self.assertTrue(_inputs_are_equal(__a , __a))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__a))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a):
feat_extract.pad(__a , truncation=__a)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a):
feat_extract.pad(__a , padding='''longest''' , truncation=__a)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__a):
feat_extract.pad(__a , padding='''longest''' , truncation=__a)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__a):
feat_extract.pad(__a , padding='''max_length''' , truncation=__a)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_UpperCamelCase = 12
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__a , truncation=__a , )
_UpperCamelCase = input_a[input_name]
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=__a , )
_UpperCamelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_UpperCamelCase = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
_UpperCamelCase = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(__a))
self.assertFalse(_inputs_have_equal_length(__a))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self._check_padding(numpify=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
self._check_padding(numpify=__a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self._check_truncation(numpify=__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self._check_truncation(numpify=__a)
@require_torch
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , return_tensors='''np''')[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , return_tensors='''pt''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
@require_tf
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , return_tensors='''np''')[input_name]
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , return_tensors='''tf''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1e-2)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_dict
_UpperCamelCase = True
_UpperCamelCase = self.feature_extraction_class(**__a)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = [len(__a) for x in speech_inputs]
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = feat_extract.pad(__a , padding='''longest''' , return_tensors='''np''')
self.assertIn('''attention_mask''' , __a)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , __a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.feat_extract_dict
_UpperCamelCase = True
_UpperCamelCase = self.feature_extraction_class(**__a)
_UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCamelCase = [len(__a) for x in speech_inputs]
_UpperCamelCase = feat_extract.model_input_names[0]
_UpperCamelCase = BatchFeature({input_name: speech_inputs})
_UpperCamelCase = min(__a)
_UpperCamelCase = feat_extract.pad(
__a , padding='''max_length''' , max_length=__a , truncation=__a , return_tensors='''np''')
self.assertIn('''attention_mask''' , __a)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 19 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 20 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.