code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_A : Optional[int] = logging.getLogger(__name__)
@dataclass
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
@dataclass
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
class a__ ( a_ ):
__lowerCAmelCase = """train"""
__lowerCAmelCase = """dev"""
__lowerCAmelCase = """test"""
class a__ :
@staticmethod
def __magic_name__ ( _a , _a ):
raise NotImplementedError
@staticmethod
def __magic_name__ ( _a ):
raise NotImplementedError
@staticmethod
def __magic_name__ ( _a , _a , _a , _a , _a=False , _a="[CLS]" , _a=1 , _a="[SEP]" , _a=False , _a=False , _a=0 , _a=0 , _a=-100 , _a=0 , _a=True , ):
lowercase : Optional[Any] = {label: i for i, label in enumerate(_a )}
lowercase : Tuple = []
for ex_index, example in enumerate(_a ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" , _a , len(_a ) )
lowercase : Optional[Any] = []
lowercase : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
lowercase : Dict = tokenizer.tokenize(_a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_a ) > 0:
tokens.extend(_a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase : int = tokenizer.num_special_tokens_to_add()
if len(_a ) > max_seq_length - special_tokens_count:
lowercase : str = tokens[: (max_seq_length - special_tokens_count)]
lowercase : List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase : Optional[int] = [sequence_a_segment_id] * len(_a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase : Optional[Any] = [cls_token] + tokens
lowercase : Dict = [pad_token_label_id] + label_ids
lowercase : int = [cls_token_segment_id] + segment_ids
lowercase : Dict = tokenizer.convert_tokens_to_ids(_a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(_a )
# Zero-pad up to the sequence length.
lowercase : Tuple = max_seq_length - len(_a )
if pad_on_left:
lowercase : List[Any] = ([pad_token] * padding_length) + input_ids
lowercase : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase : Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase : str = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(_a ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(_a ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(_a ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(_a ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(_a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase : List[Any] = None
features.append(
InputFeatures(
input_ids=_a , attention_mask=_a , token_type_ids=_a , label_ids=_a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( a_ ):
__lowerCAmelCase = 42
__lowerCAmelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self , _a , _a , _a , _a , _a , _a = None , _a=False , _a = Split.train , ):
# Load data features from cache or dataset file
lowercase : Union[str, Any] = os.path.join(
_a , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_a ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase : Optional[Any] = cached_features_file + ".lock"
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowercase : Dict = torch.load(_a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowercase : List[Any] = token_classification_task.read_examples_from_file(_a , _a )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase : Optional[Any] = token_classification_task.convert_examples_to_features(
_a , _a , _a , _a , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _a )
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
__lowerCAmelCase = 42
__lowerCAmelCase = -100
def __init__( self , _a , _a , _a , _a , _a , _a = None , _a=False , _a = Split.train , ):
lowercase : Dict = token_classification_task.read_examples_from_file(_a , _a )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase : Optional[Any] = token_classification_task.convert_examples_to_features(
_a , _a , _a , _a , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase : str = tf.data.Dataset.from_generator(
_a , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowercase : Any = tf.data.Dataset.from_generator(
_a , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __magic_name__ ( self ):
lowercase : str = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , _a ):
return self.features[i]
| 202 |
"""simple docstring"""
def __magic_name__ ( __snake_case : list ) -> list:
if len(__snake_case ) < 2:
return collection
def circle_sort_util(__snake_case : list , __snake_case : int , __snake_case : int ) -> bool:
lowercase : List[Any] = False
if low == high:
return swapped
lowercase : Union[str, Any] = low
lowercase : str = high
while left < right:
if collection[left] > collection[right]:
lowercase , lowercase : Optional[Any] = (
collection[right],
collection[left],
)
lowercase : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase , lowercase : str = (
collection[right + 1],
collection[left],
)
lowercase : Union[str, Any] = True
lowercase : Any = low + int((high - low) / 2 )
lowercase : Tuple = circle_sort_util(__snake_case , __snake_case , __snake_case )
lowercase : List[Any] = circle_sort_util(__snake_case , mid + 1 , __snake_case )
return swapped or left_swap or right_swap
lowercase : int = True
while is_not_sorted is True:
lowercase : int = circle_sort_util(__snake_case , 0 , len(__snake_case ) - 1 )
return collection
if __name__ == "__main__":
_A : str = input("""Enter numbers separated by a comma:\n""").strip()
_A : Dict = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 202 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __get__( self ,UpperCAmelCase_ ,UpperCAmelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
_lowercase : Dict = """__cached_""" + self.fget.__name__
_lowercase : int = getattr(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
if cached is None:
_lowercase : str = self.fget(UpperCAmelCase_ )
setattr(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
return cached
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if is_torch_fx_proxy(__UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(__UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__UpperCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return isinstance(__UpperCAmelCase , np.ndarray )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return _is_numpy(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import torch
return isinstance(__UpperCAmelCase , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_torch_available() else _is_torch(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import torch
return isinstance(__UpperCAmelCase , torch.device )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_device(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import torch
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = getattr(__UpperCAmelCase , __UpperCAmelCase )
else:
return False
return isinstance(__UpperCAmelCase , torch.dtype )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_torch_available() else _is_torch_dtype(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import tensorflow as tf
return isinstance(__UpperCAmelCase , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_tf_available() else _is_tensorflow(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__UpperCAmelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(__UpperCAmelCase )
return type(__UpperCAmelCase ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(__UpperCAmelCase , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return False if not is_flax_available() else _is_jax(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(__UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return [to_py_obj(__UpperCAmelCase ) for o in obj]
elif is_tf_tensor(__UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__UpperCAmelCase ):
return np.asarray(__UpperCAmelCase ).tolist()
elif isinstance(__UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(__UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return np.array(__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(__UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__UpperCAmelCase ):
return np.asarray(__UpperCAmelCase )
else:
return obj
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = fields(self )
# Safety and consistency checks
if not len(UpperCAmelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
_lowercase : Optional[Any] = getattr(self ,class_fields[0].name )
_lowercase : Tuple = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = first_field.items()
_lowercase : Optional[Any] = True
else:
try:
_lowercase : str = iter(UpperCAmelCase_ )
_lowercase : int = True
except TypeError:
_lowercase : List[str] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCAmelCase_ ):
if (
not isinstance(UpperCAmelCase_ ,(list, tuple) )
or not len(UpperCAmelCase_ ) == 2
or not isinstance(element[0] ,UpperCAmelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_lowercase : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
_lowercase : Any = element[1]
elif first_field is not None:
_lowercase : Dict = first_field
else:
for field in class_fields:
_lowercase : Optional[int] = getattr(self ,field.name )
if v is not None:
_lowercase : Tuple = v
def __delitem__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self ,UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[int] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCAmelCase_ ,UpperCAmelCase_ )
super().__setattr__(UpperCAmelCase_ ,UpperCAmelCase_ )
def __setitem__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCAmelCase_ ,UpperCAmelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
return tuple(self[k] for k in self.keys() )
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "longest"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "max_length"
SCREAMING_SNAKE_CASE_ : Optional[int] = "do_not_pad"
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "pt"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "tf"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "np"
SCREAMING_SNAKE_CASE_ : Dict = "jax"
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : List[Any] = context_managers
_lowercase : Any = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCAmelCase_ )
def __exit__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
self.stack.__exit__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : List[Any] = infer_framework(__UpperCAmelCase )
if framework == "tf":
_lowercase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowercase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowercase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : int = model_class.__name__
_lowercase : Union[str, Any] = infer_framework(__UpperCAmelCase )
if framework == "tf":
_lowercase : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_lowercase : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_lowercase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = "" , __UpperCAmelCase = "." ):
def _flatten_dict(__UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="." ):
for k, v in d.items():
_lowercase : Any = str(__UpperCAmelCase ) + delimiter + str(__UpperCAmelCase ) if parent_key else k
if v and isinstance(__UpperCAmelCase , __UpperCAmelCase ):
yield from flatten_dict(__UpperCAmelCase , __UpperCAmelCase , delimiter=__UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if is_numpy_array(__UpperCAmelCase ):
return np.transpose(__UpperCAmelCase , axes=__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.T if axes is None else array.permute(*__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(__UpperCAmelCase , perm=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.transpose(__UpperCAmelCase , axes=__UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for transpose: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if is_numpy_array(__UpperCAmelCase ):
return np.reshape(__UpperCAmelCase , __UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.reshape(*__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(__UpperCAmelCase , __UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.reshape(__UpperCAmelCase , __UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for reshape: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if is_numpy_array(__UpperCAmelCase ):
return np.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.squeeze(__UpperCAmelCase , axis=__UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for squeeze: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
if is_numpy_array(__UpperCAmelCase ):
return np.expand_dims(__UpperCAmelCase , __UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.unsqueeze(dim=__UpperCAmelCase )
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(__UpperCAmelCase , axis=__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return jnp.expand_dims(__UpperCAmelCase , axis=__UpperCAmelCase )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if is_numpy_array(__UpperCAmelCase ):
return np.size(__UpperCAmelCase )
elif is_torch_tensor(__UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(__UpperCAmelCase ):
import tensorflow as tf
return tf.size(__UpperCAmelCase )
elif is_jax_tensor(__UpperCAmelCase ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(__UpperCAmelCase )}.""" )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
for key, value in auto_map.items():
if isinstance(__UpperCAmelCase , (tuple, list) ):
_lowercase : Optional[Any] = [F"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
_lowercase : Any = F"""{repo_id}--{value}"""
return auto_map
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
for base_class in inspect.getmro(__UpperCAmelCase ):
_lowercase : str = base_class.__module__
_lowercase : Optional[Any] = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 336 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase: Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = XLNetTokenizer
SCREAMING_SNAKE_CASE_ : Dict = XLNetTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = """<s>"""
_lowercase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) ,UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
_lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<eod>""" )
self.assertEqual(len(UpperCAmelCase_ ) ,10_06 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,10_00 )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,keep_accents=UpperCAmelCase_ )
_lowercase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) ,[2_85, 46, 10, 1_70, 3_82] )
_lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
_lowercase : List[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ ,[8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""▁he""", """ll""", """o"""] )
def lowerCamelCase__ ( self ):
_lowercase : int = XLNetTokenizer(UpperCAmelCase_ ,do_lower_case=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] ,)
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowercase : int = tokenizer.encode("""sequence builders""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : List[str] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=UpperCAmelCase_ )
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_lowercase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ,UpperCAmelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def lowerCamelCase__ ( self ):
# fmt: off
_lowercase : Union[str, Any] = {"""input_ids""": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ ,model_name="""xlnet-base-cased""" ,revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" ,)
| 336 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : List[str] = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
snake_case : List[str] = 1024
snake_case : Optional[int] = 4096
snake_case : int = 24
snake_case : Any = 16
snake_case : int = [5, 11, 17, 23]
snake_case : Tuple = [256, 512, 1024, 1024]
snake_case : int = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
snake_case : List[str] = 768
snake_case : Tuple = [1, 1, 1, 0.5]
snake_case : Any = [256, 512, 768, 768]
snake_case : Dict = 150
snake_case : Optional[int] = 16
snake_case : str = (1, 384, 384)
snake_case : List[Any] = False
snake_case : List[str] = "project"
if "ade" in checkpoint_url:
snake_case : Any = True
snake_case : Dict = 768
snake_case : Dict = [1, 1, 1, 0.5]
snake_case : int = 150
snake_case : Union[str, Any] = 16
snake_case : int = "huggingface/label-files"
snake_case : Optional[Any] = "ade20k-id2label.json"
snake_case : Optional[int] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
snake_case : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : Tuple = idalabel
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Optional[Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case : List[Any] = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
snake_case : Any = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
snake_case : str = name.replace("patch_embed" , "" )
if "pos_embed" in name:
snake_case : int = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
snake_case : List[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
snake_case : List[str] = name.replace("proj" , "projection" )
if "blocks" in name:
snake_case : Optional[Any] = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
snake_case : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : List[Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
snake_case : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
snake_case : Dict = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
snake_case : Optional[int] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
snake_case : str = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
snake_case : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
snake_case : Any = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
snake_case : Dict = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
snake_case : List[Any] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
snake_case : str = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case : Optional[Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
snake_case : Any = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
snake_case : List[str] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
snake_case : Union[str, Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
snake_case : str = name.replace("conv1" , "convolution1" )
if "conv2" in name:
snake_case : Dict = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case : Dict = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case : List[Any] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case : str = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case : List[Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
snake_case : List[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
snake_case : List[str] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
snake_case : Any = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
snake_case : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
snake_case : Dict = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
snake_case : Optional[int] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
snake_case : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
snake_case : Any = name.replace("bn" , "batch_norm" )
if "head" in name:
snake_case : str = name.replace("head" , "head.head" )
if "encoder.norm" in name:
snake_case : Dict = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
snake_case : Dict = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
snake_case : Dict = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
snake_case : int = name.replace(".." , "." )
if "stem.conv" in name:
snake_case : List[Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
snake_case : Union[str, Any] = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
snake_case : List[str] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
snake_case : str = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
snake_case : Any = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
snake_case : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
snake_case : str = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
snake_case : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[str] = in_proj_weight[: config.hidden_size, :]
snake_case : Dict = in_proj_bias[: config.hidden_size]
snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ):
snake_case : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : Union[str, Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case , snake_case : Any = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
snake_case : Optional[Any] = state_dict.pop(__lowerCamelCase )
snake_case : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
snake_case : Union[str, Any] = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
snake_case : Tuple = 480 if "ade" in checkpoint_url else 384
snake_case : Tuple = DPTImageProcessor(size=__lowerCamelCase )
snake_case : int = prepare_img()
snake_case : Tuple = image_processor(__lowerCamelCase , return_tensors="pt" )
# forward pass
snake_case : Tuple = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
snake_case : List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
__lowerCamelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 59 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> list:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase = result + left + right
return input_list
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowerCAmelCase = list(snake_case__ )
# iteration for two-way merging
lowerCAmelCase = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowerCAmelCase = i
lowerCAmelCase = i + p - 1
lowerCAmelCase = (low + high + 1) // 2
lowerCAmelCase = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowerCAmelCase = i
lowerCAmelCase = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
a = []
else:
a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 155 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A , A ) ->list[list[int]]:
"""simple docstring"""
lowercase__ : list[list[int]]= []
create_all_state(1 , A , A , [] , A )
return result
def lowercase__(A , A , A , A , A , ) ->None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def lowercase__(A ) ->None:
"""simple docstring"""
for i in total_list:
print(*A )
if __name__ == "__main__":
a : str = 4
a : Any = 2
a : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 150 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= "A painting of a squirrel eating a burger "
lowercase__ : Optional[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= generator.manual_seed(0 )
lowercase__ : Tuple= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[str]= "A painting of a squirrel eating a burger "
lowercase__ : Union[str, Any]= torch.manual_seed(0 )
lowercase__ : Optional[Any]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : List[str]= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int]= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""image""": Image()} )
a__ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
a__ : str = "image"
a__ : str = "labels"
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case : Dict = copy.deepcopy(self )
snake_case : Tuple = self.label_schema.copy()
snake_case : Union[str, Any] = features[self.label_column]
snake_case : Dict = label_schema
return task_template
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 148 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = "path-to-your-trained-model"
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__A = "A photo of sks dog in a bucket"
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 148 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [0] * len(lowercase )
for i in range(1 ,len(lowercase ) ):
# use last results for better performance - dynamic programming
_UpperCAmelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCAmelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCAmelCase = j
return prefix_result
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return max(prefix_function(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | """simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : int = 'van'
def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = strides
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_ratios
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = dropout_rate
| 30 | 0 |
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
if n == 0:
return 0
lowerCamelCase__ : Union[str, Any] = float("-inf" )
for i in range(1 , n + 1 ):
lowerCamelCase__ : int = max(
_A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) )
return max_revue
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
lowerCamelCase__ : int = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_A , _A , _A )
def lowercase_ ( _A : int , _A : list , _A : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowerCamelCase__ : Dict = float("-inf" )
for i in range(1 , n + 1 ):
lowerCamelCase__ : int = max(
_A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , )
lowerCamelCase__ : List[Any] = max_revenue
return max_rev[n]
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
_enforce_args(_A , _A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowerCamelCase__ : int = [float("-inf" ) for _ in range(n + 1 )]
lowerCamelCase__ : Optional[int] = 0
for i in range(1 , n + 1 ):
lowerCamelCase__ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
lowerCamelCase__ : Any = max(_A , prices[j - 1] + max_rev[i - j] )
lowerCamelCase__ : Any = max_revenue_i
return max_rev[n]
def lowercase_ ( _A : int , _A : list ):
"""simple docstring"""
if n < 0:
lowerCamelCase__ : Optional[int] = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(_A )
if n > len(_A ):
lowerCamelCase__ : Optional[int] = (
"Each integral piece of rod must have a corresponding price. "
F"Got n = {n} but length of prices = {len(_A )}"
)
raise ValueError(_A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [6, 10, 12, 15, 20, 23]
lowerCamelCase__ : Dict = len(_A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowerCamelCase__ : int = 36
lowerCamelCase__ : List[Any] = top_down_cut_rod(_A , _A )
lowerCamelCase__ : List[Any] = bottom_up_cut_rod(_A , _A )
lowerCamelCase__ : List[Any] = naive_cut_rod_recursive(_A , _A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 184 |
class _lowercase :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = n
lowerCamelCase__ : Union[str, Any] = [None] * self.n
lowerCamelCase__ : List[str] = 0 # index of the first element
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 0
def __len__( self : Tuple ):
'''simple docstring'''
return self.size
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.size == 0
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase ( self : str , __lowerCamelCase : List[str] ):
'''simple docstring'''
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
lowerCamelCase__ : Optional[Any] = data
lowerCamelCase__ : Tuple = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
if self.size == 0:
raise Exception("UNDERFLOW" )
lowerCamelCase__ : Any = self.array[self.front]
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : str = (self.front + 1) % self.n
self.size -= 1
return temp
| 184 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( _SCREAMING_SNAKE_CASE):
def __init__( self : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ):
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 366 | import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
SCREAMING_SNAKE_CASE : Tuple = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(_lowercase ):
os.makedirs(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = model.state_dict()
def to_tf_var_name(_lowercase ):
for patt, repl in iter(_lowercase ):
SCREAMING_SNAKE_CASE : Dict = name.replace(_lowercase , _lowercase )
return f"""bert/{name}"""
def create_tf_var(_lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE : Tuple = tf.get_variable(dtype=_lowercase , shape=tensor.shape , name=_lowercase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowercase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE : List[str] = to_tf_var_name(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE : Any = torch_tensor.T
SCREAMING_SNAKE_CASE : str = create_tf_var(tensor=_lowercase , name=_lowercase , session=_lowercase )
tf.keras.backend.set_value(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Dict = session.run(_lowercase )
print(f"""Successfully created {tf_name}: {np.allclose(_lowercase , _lowercase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowercase , os.path.join(_lowercase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A ( _lowercase=None ):
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_lowercase , required=_lowercase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=_lowercase , required=_lowercase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=_lowercase , required=_lowercase , help='''Directory in which to save tensorflow model''' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args(_lowercase )
SCREAMING_SNAKE_CASE : Any = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowercase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 258 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A__ ( UpperCAmelCase__ ):
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : List[Any] ={"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : Dict =self._create_example_records()
_a : Dict =Dataset.from_list(SCREAMING_SNAKE_CASE )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(SCREAMING_SNAKE_CASE ):
self.assertDictEqual(SCREAMING_SNAKE_CASE , example_records[i] )
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
_a : Any =self._create_example_records()
_a : Union[str, Any] =Dataset.from_list(SCREAMING_SNAKE_CASE )
_a : Dict =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __UpperCAmelCase ( self :Any ) -> Union[str, Any]: # checks what happens with missing columns
'''simple docstring'''
_a : Dict =[{"""col_1""": 1}, {"""col_2""": """x"""}]
_a : Dict =Dataset.from_list(SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def __UpperCAmelCase ( self :Optional[int] ) -> int: # checks if the type can be inferred from the second record
'''simple docstring'''
_a : Any =[{"""col_1""": []}, {"""col_1""": [1, 2]}]
_a : List[str] =Dataset.from_list(SCREAMING_SNAKE_CASE )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def __UpperCAmelCase ( self :int ) -> int:
'''simple docstring'''
_a : Any =Dataset.from_list([] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 276 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int]=1_3 , SCREAMING_SNAKE_CASE :Optional[int]=7 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :Dict=True , SCREAMING_SNAKE_CASE :Optional[int]=False , SCREAMING_SNAKE_CASE :Optional[Any]=True , SCREAMING_SNAKE_CASE :List[str]=3_3 , SCREAMING_SNAKE_CASE :Tuple=3_2 , SCREAMING_SNAKE_CASE :Tuple=5 , SCREAMING_SNAKE_CASE :int=4 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Optional[Any]=0.1 , SCREAMING_SNAKE_CASE :Tuple=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Dict=1_6 , SCREAMING_SNAKE_CASE :Dict=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :str=3 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
_a : Union[str, Any] =parent
_a : List[Any] =batch_size
_a : Optional[int] =seq_length
_a : Union[str, Any] =is_training
_a : List[Any] =use_input_mask
_a : Optional[int] =use_token_type_ids
_a : int =use_labels
_a : List[str] =vocab_size
_a : List[Any] =hidden_size
_a : int =num_hidden_layers
_a : Tuple =num_attention_heads
_a : Any =intermediate_size
_a : str =hidden_act
_a : Union[str, Any] =hidden_dropout_prob
_a : Union[str, Any] =attention_probs_dropout_prob
_a : str =max_position_embeddings
_a : Dict =type_vocab_size
_a : Tuple =type_sequence_label_size
_a : Dict =initializer_range
_a : List[str] =num_labels
_a : Tuple =num_choices
_a : int =scope
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
_a : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : List[Any] =None
if self.use_input_mask:
_a : Any =random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] =None
_a : str =None
_a : Dict =None
if self.use_labels:
_a : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : List[str] =ids_tensor([self.batch_size] , self.num_choices )
_a : List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int ) -> Tuple:
'''simple docstring'''
_a : Any =EsmModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Optional[Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
_a : str =model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : str =EsmForMaskedLM(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.num_labels
_a : Tuple =EsmForTokenClassification(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
_a : Tuple =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any =config_and_inputs
_a : List[Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Any = False
__UpperCamelCase : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : str = ()
__UpperCamelCase : List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
_a : Dict =EsmModelTester(self )
_a : Optional[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Dict =type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
_a : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Union[str, Any] =EsmModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Dict =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_a : Optional[Any] =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_a : Any =create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def __UpperCAmelCase ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.model_tester.prepare_config_and_inputs()[0]
_a : Optional[int] =EsmEmbeddings(config=SCREAMING_SNAKE_CASE )
_a : Tuple =torch.empty(2 , 4 , 3_0 )
_a : str =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_a : int =torch.as_tensor([expected_single_positions, expected_single_positions] )
_a : Any =embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCAmelCase ( self :str ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
pass
@require_torch
class A__ ( UpperCAmelCase__ ):
@slow
def __UpperCAmelCase ( self :List[Any] ) -> str:
'''simple docstring'''
with torch.no_grad():
_a : Optional[int] =EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 1, 2, 3, 4, 5]] )
_a : Tuple =model(SCREAMING_SNAKE_CASE )[0]
_a : int =3_3
_a : Tuple =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_a : Any =EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_a : Any =torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_a : int =model(SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_a : str =torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 276 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__UpperCamelCase : Union[str, Any] = '''sshleifer/bart-tiny-random'''
__UpperCamelCase : int = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Optional[Any] ):
return AutoConfig.from_pretrained(lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = create_student_by_copying_alternating_layers(lowercase_ ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : int = create_student_by_copying_alternating_layers(lowercase_ ,tempfile.mkdtemp() ,e=1 ,d=lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[int] = create_student_by_copying_alternating_layers(lowercase_ ,tempfile.mkdtemp() ,e=1 ,d=lowercase_ )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[Any] = create_student_by_copying_alternating_layers(lowercase_ ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def __lowerCAmelCase ( self : Tuple ):
with self.assertRaises(lowercase_ ):
create_student_by_copying_alternating_layers(lowercase_ ,tempfile.mkdtemp() ,e=lowercase_ ,d=lowercase_ )
| 357 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 74 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """spiece.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
UpperCamelCase_ = {"""bert_for_seq_generation""": 5_12}
class a_ (_a ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<::::>" , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , sep_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@property
def __UpperCamelCase ( self ):
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : Optional[Any] = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.piece_to_id(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.sp_model.IdToPiece(snake_case_ )
return token
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 309 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase_ ) , torch_builtin(UpperCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase_ ) , gelu_new(UpperCamelCase_ ) ) )
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
lowercase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase__ = get_activation('''gelu''' )
lowercase__ = get_activation('''gelu_10''' )
lowercase__ = torch_builtin(UpperCamelCase_ )
lowercase__ = geluaa(UpperCamelCase_ )
lowercase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation('''bogus''' )
with self.assertRaises(UpperCamelCase_ ):
get_activation(UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
lowercase__ = get_activation('''gelu''' )
lowercase__ = 1
lowercase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = acta.a | 356 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase = '<<<<<<< This should probably be modified because it mentions: '
lowerCAmelCase = '=======\n>>>>>>>\n'
lowerCAmelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCAmelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _a ( UpperCamelCase__ ):
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> int:
"""simple docstring"""
lowercase__ = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = get_logger('''datasets-cli/converting''' )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(UpperCamelCase_ )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__ = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__ = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__ = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace('''.py''' , '''''' )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(UpperCamelCase_ )
lowercase__ = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 93 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class A__ ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = 2
@register_to_config
def __init__( self : Dict , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1_0_0 , lowerCAmelCase__ : float = 1.007 , lowerCAmelCase__ : float = 8_0 , lowerCAmelCase__ : float = 0.05 , lowerCAmelCase__ : float = 5_0 , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = sigma_max
# setable values
_UpperCAmelCase : int = None
_UpperCAmelCase : np.IntTensor = None
_UpperCAmelCase : torch.FloatTensor = None # sigma(t_i)
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, torch.device] = None ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = num_inference_steps
_UpperCAmelCase : List[Any] = np.arange(0 , self.num_inference_steps )[::-1].copy()
_UpperCAmelCase : str = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
_UpperCAmelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCAmelCase : int = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
_UpperCAmelCase : Any = sigma + gamma * sigma
_UpperCAmelCase : Union[str, Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase : Tuple = sample_hat + sigma_hat * model_output
_UpperCAmelCase : Any = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = sample_prev + sigma_prev * model_output
_UpperCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError() | 145 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__a = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__a = '▁'
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] = BarthezTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , **lowerCAmelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Any = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
_UpperCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,) | 145 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = [1]
for i in range(2 ,lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase = []
_UpperCAmelCase = list(range(lowercase ) )
# Find permutation
while factorials:
_UpperCAmelCase = factorials.pop()
_UpperCAmelCase , _UpperCAmelCase = divmod(lowercase ,lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | """simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
UpperCAmelCase__ = """"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
# authorize twitter, initialize tweepy
_UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase )
auth.set_access_token(lowercase ,lowercase )
_UpperCAmelCase = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
_UpperCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
_UpperCAmelCase = api.user_timeline(
screen_name=lowercase ,count=2_00 ,max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
_UpperCAmelCase = alltweets[-1].id - 1
print(f'''...{len(lowercase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
_UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f:
_UpperCAmelCase = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 30 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCamelCase : Dict = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
if os.name == "nt":
_lowerCAmelCase : Optional[int] = CursorInfo()
_lowerCAmelCase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
_lowerCAmelCase : str = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
_lowerCAmelCase : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 44 | """simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 | 1 |
import copy
import re
class A_ :
lowerCAmelCase__ = """hp"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = None
@classmethod
def _lowerCAmelCase (cls :Dict , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Union[str, Any] )-> Tuple:
__A = prefix
__A = defaults
cls.build_naming_info()
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :List[Any] , _UpperCamelCase :Optional[int] )-> str:
if len(_UpperCamelCase ) == 0:
return ""
__A = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCamelCase ) + 1 ):
__A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCamelCase :Union[str, Any] ):
__A = ''''''
while integer != 0:
__A = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
__A = 0
while True:
__A = word + '''#''' + int_to_alphabetic(_UpperCamelCase )
if sword in info["reverse_short_word"]:
continue
else:
__A = sword
break
__A = short_word
__A = word
return short_word
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :Optional[int] , _UpperCamelCase :Any )-> Optional[int]:
__A = param_name.split('''_''' )
__A = [TrialShortNamer.shortname_for_word(_UpperCamelCase , _UpperCamelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__A = ['''''', '''_''']
for separator in separators:
__A = separator.join(_UpperCamelCase )
if shortname not in info["reverse_short_param"]:
__A = shortname
__A = param_name
return shortname
return param_name
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] )-> int:
__A = TrialShortNamer.shortname_for_key(_UpperCamelCase , _UpperCamelCase )
__A = short_name
__A = param_name
@classmethod
def _lowerCAmelCase (cls :Any )-> Optional[int]:
if cls.NAMING_INFO is not None:
return
__A = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCamelCase , _UpperCamelCase )
__A = info
@classmethod
def _lowerCAmelCase (cls :Optional[Any] , _UpperCamelCase :Any )-> str:
cls.build_naming_info()
assert cls.PREFIX is not None
__A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__A = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = 1 if v else 0
__A = '''''' if isinstance(_UpperCamelCase , (int, float) ) else '''-'''
__A = f"""{key}{sep}{v}"""
name.append(_UpperCamelCase )
return "_".join(_UpperCamelCase )
@classmethod
def _lowerCAmelCase (cls :Optional[int] , _UpperCamelCase :Tuple )-> List[str]:
__A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__A = []
else:
__A = repr.split('''_''' )
__A = {}
for value in values:
if "-" in value:
__A , __A = value.split('''-''' )
else:
__A = re.sub('''[0-9.]''' , '''''' , _UpperCamelCase )
__A = float(re.sub('''[^0-9.]''' , '''''' , _UpperCamelCase ) )
__A = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__A = cls.DEFAULTS[k]
return parameters
| 250 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
snake_case__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'ClapFeatureExtractor'
SCREAMING_SNAKE_CASE__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
a :Dict = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
a :Optional[int] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if audios is not None:
a :Tuple = self.feature_extractor(
_lowerCamelCase , sampling_rate=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and audios is not None:
a :Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.tokenizer.model_input_names
a :str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 94 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowercase__ : Tuple = TypeVar("T")
class a__ ( Generic[T] ):
def __init__( self , A ) -> None:
'''simple docstring'''
a = data
a = self
a = 0
class a__ ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
a = {}
def lowerCAmelCase_ ( self , A ) -> None:
'''simple docstring'''
a = DisjointSetTreeNode(a__ )
def lowerCAmelCase_ ( self , A ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
a = self.map[data]
if elem_ref != elem_ref.parent:
a = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCAmelCase_ ( self , A , A ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
a = nodea
else:
a = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCAmelCase_ ( self , A , A ) -> None:
'''simple docstring'''
self.link(self.find_set(a__ ) , self.find_set(a__ ) )
class a__ ( Generic[T] ):
def __init__( self ) -> None:
'''simple docstring'''
a = {}
def lowerCAmelCase_ ( self , A ) -> None:
'''simple docstring'''
if node not in self.connections:
a = {}
def lowerCAmelCase_ ( self , A , A , A ) -> None:
'''simple docstring'''
self.add_node(a__ )
self.add_node(a__ )
a = weight
a = weight
def lowerCAmelCase_ ( self ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
a = []
a = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda A : x[2] )
# creating the disjoint set
a = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(a__ )
# MST generation
a = 0
a = 0
a = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
a , a , a = edges[index]
index += 1
a = disjoint_set.find_set(a__ )
a = disjoint_set.find_set(a__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(a__ , a__ , a__ )
disjoint_set.union(a__ , a__ )
return graph
| 354 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase__ : str = TypeVar("T")
class a__ ( Generic[T] ):
def __init__( self , A = True ) -> None:
'''simple docstring'''
a = {} # dictionary of lists
a = directed
def lowerCAmelCase_ ( self , A , A ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
self.adj_list[destination_vertex].append(A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A )
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a = [destination_vertex]
a = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
a = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a = [destination_vertex]
a = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 180 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "linear"
lowerCAmelCase__ = "cosine"
lowerCAmelCase__ = "cosine_with_restarts"
lowerCAmelCase__ = "polynomial"
lowerCAmelCase__ = "constant"
lowerCAmelCase__ = "constant_with_warmup"
lowerCAmelCase__ = "piecewise_constant"
def a__ ( a__ , a__ = -1 ):
"""simple docstring"""
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = float(a__ )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(a__ , a__ ):
def rule_func(a__ ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ , a__=-1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 0.5 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 1 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__=1E-7 , a__=1.0 , a__=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
UpperCAmelCase : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = 1 , a__ = 1.0 , a__ = -1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SchedulerType(a__ )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 267 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "linear"
lowerCAmelCase__ = "cosine"
lowerCAmelCase__ = "cosine_with_restarts"
lowerCAmelCase__ = "polynomial"
lowerCAmelCase__ = "constant"
lowerCAmelCase__ = "constant_with_warmup"
lowerCAmelCase__ = "piecewise_constant"
def a__ ( a__ , a__ = -1 ):
"""simple docstring"""
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ = -1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(a__ )
__SCREAMING_SNAKE_CASE = float(a__ )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(a__ , a__ ):
def rule_func(a__ ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def a__ ( a__ , a__ , a__ , a__=-1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 0.5 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ = 1 , a__ = -1 ):
"""simple docstring"""
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ , a__=1E-7 , a__=1.0 , a__=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(a__ ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
UpperCAmelCase : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = 1 , a__ = 1.0 , a__ = -1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SchedulerType(a__ )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 267 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = tempfile.mkdtemp()
lowercase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowercase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase : Any = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowercase : List[str] = os.path.join(self.tmpdirname ,snake_case )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
lowercase : str = [Image.fromarray(np.moveaxis(snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.get_tokenizer()
lowercase : Dict = self.get_rust_tokenizer()
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=snake_case )
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,snake_case )
self.assertIsInstance(processor_fast.tokenizer ,snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,snake_case )
self.assertIsInstance(processor_fast.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : int = self.get_tokenizer(cls_token="""(CLS)""" ,sep_token="""(SEP)""" )
lowercase : Dict = self.get_image_processor(do_normalize=snake_case )
lowercase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname ,cls_token="""(CLS)""" ,sep_token="""(SEP)""" ,do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Optional[int] = self.prepare_image_inputs()
lowercase : Tuple = image_processor(snake_case ,return_tensors="""np""" )
lowercase : Optional[int] = processor(images=snake_case ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : Any = """Alexandra,T-shirt的价格是15便士。"""
lowercase : int = processor(text=snake_case )
lowercase : Dict = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.get_image_processor()
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : List[str] = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Any = self.prepare_image_inputs()
lowercase : Optional[Any] = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.get_image_processor()
lowercase : str = self.get_tokenizer()
lowercase : Dict = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Any = processor.batch_decode(snake_case )
lowercase : str = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=snake_case ,image_processor=snake_case )
lowercase : str = """Alexandra,T-shirt的价格是15便士。"""
lowercase : Optional[Any] = self.prepare_image_inputs()
lowercase : Dict = processor(text=snake_case ,images=snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 285 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
lowercase : Union[str, Any] = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowercase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowercase : str = load_original_entity_vocab(SCREAMING_SNAKE_CASE__ )
# add an entry for [MASK2]
lowercase : Any = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase : List[Any] = AddedToken("""<ent>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
lowercase : int = AddedToken("""<ent2>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """tokenizer_config.json""" ) , """r""" ) as f:
lowercase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
lowercase : Dict = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowercase : Dict = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowercase : int = state_dict["""embeddings.word_embeddings.weight"""]
lowercase : Union[str, Any] = word_emb[ent_init_index].unsqueeze(0 )
lowercase : List[str] = word_emb[enta_init_index].unsqueeze(0 )
lowercase : str = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase : List[Any] = state_dict[bias_name]
lowercase : Any = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase : Tuple = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase : int = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase : Union[str, Any] = f"encoder.layer.{layer_index}.attention.self."
lowercase : List[str] = state_dict[prefix + matrix_name]
lowercase : Any = state_dict[prefix + matrix_name]
lowercase : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase : Tuple = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase : Optional[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase : Optional[Any] = state_dict["""entity_predictions.bias"""]
lowercase : str = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowercase : List[str] = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase : List[str] = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowercase : List[str] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowercase : List[Any] = state_dict[key]
else:
lowercase : Union[str, Any] = state_dict[key]
lowercase , lowercase : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if set(SCREAMING_SNAKE_CASE__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(SCREAMING_SNAKE_CASE__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task="""entity_classification""" )
lowercase : str = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowercase : str = (0, 9)
lowercase : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors="""pt""" )
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase : List[Any] = torch.Size((1, 33, 768) )
lowercase : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase : Optional[int] = torch.Size((1, 1, 768) )
lowercase : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase : Any = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = """Tokyo is the capital of <mask>."""
lowercase : List[Any] = (24, 30)
lowercase : int = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors="""pt""" )
lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = encoding["""input_ids"""][0].tolist()
lowercase : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowercase : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = outputs.entity_logits[0][0].argmax().item()
lowercase : int = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Optional[int] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowercase : List[str] = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in open(SCREAMING_SNAKE_CASE__ )]
lowercase : int = {}
for entry in data:
lowercase : Optional[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase : Optional[Any] = entity_id
break
lowercase : List[Any] = f"{language}:{entity_name}"
lowercase : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowercase : str = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 285 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77 | """simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase_ :
def __init__( self , a ) -> List[str]:
if isinstance(a , a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase__ : Optional[Any] = deepcopy(a )
elif os.path.exists(a ):
with io.open(a , 'r' , encoding='utf-8' ) as f:
lowercase__ : List[Any] = json.load(a )
else:
try:
lowercase__ : Optional[int] = baseaa.urlsafe_baadecode(a ).decode('utf-8' )
lowercase__ : List[str] = json.loads(a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowercase__ : Any = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase__ : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
lowercase__ : int = False
if self.is_zeroa() or self.is_zeroa():
lowercase__ : str = set(['cpu', 'nvme'] )
lowercase__ : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase__ : Optional[Any] = True
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Dict = self.config
# find the config node of interest if it exists
lowercase__ : int = ds_key_long.split('.' )
lowercase__ : Dict = nodes.pop()
for node in nodes:
lowercase__ : Optional[Any] = config.get(a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , a , a=None ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = self.find_config_node(a )
if config is None:
return default
return config.get(a , a )
def _UpperCAmelCase ( self , a , a=False ) -> Any:
lowercase__ : str = self.config
# find the config node of interest if it exists
lowercase__ : List[Any] = ds_key_long.split('.' )
for node in nodes:
lowercase__ : str = config
lowercase__ : str = config.get(a )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a )
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : Union[str, Any] = self.get_value(a )
return False if value is None else bool(a )
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Any = self.get_value(a )
return False if value is None else not bool(a )
def _UpperCAmelCase ( self ) -> Tuple:
return self._stage == 2
def _UpperCAmelCase ( self ) -> List[Any]:
return self._stage == 3
def _UpperCAmelCase ( self ) -> str:
return self._offload
class UpperCAmelCase_ :
def __init__( self , a ) -> str:
lowercase__ : Tuple = engine
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
# runs backpropagation and handles mixed precision
self.engine.backward(a , **a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> Dict:
super().__init__(a , device_placement=a , scaler=a )
lowercase__ : Union[str, Any] = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , a=None ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ) -> Tuple:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase_ ( _a):
def __init__( self , a , a ) -> Any:
super().__init__(a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase_ :
def __init__( self , a , a=0.001 , a=0 , **a ) -> Tuple:
lowercase__ : List[Any] = params
lowercase__ : int = lr
lowercase__ : int = weight_decay
lowercase__ : Union[str, Any] = kwargs
class UpperCAmelCase_ :
def __init__( self , a , a=None , a=0 , **a ) -> Tuple:
lowercase__ : Dict = optimizer
lowercase__ : List[str] = total_num_steps
lowercase__ : Optional[int] = warmup_num_steps
lowercase__ : List[Any] = kwargs
| 77 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int):
lowercase__ : Any = b.T
lowercase__ : Optional[int] = np.sum(np.square(_lowerCamelCase) , axis=1)
lowercase__ : Dict = np.sum(np.square(_lowerCamelCase) , axis=0)
lowercase__ : int = np.matmul(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
lowercase__ : Dict = x.reshape(-1 , 3)
lowercase__ : Optional[int] = squared_euclidean_distance(_lowerCamelCase , _lowerCamelCase)
return np.argmin(_lowerCamelCase , axis=1)
class snake_case_ ( __A ):
__A : Tuple = ["pixel_values"]
def __init__( self : Optional[int] , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : bool = True , **lowercase_ : Tuple , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Dict = size if size is not None else {"height": 2_56, "width": 2_56}
lowercase__ : Optional[int] = get_size_dict(lowercase_ )
lowercase__ : List[Any] = np.array(lowercase_ ) if clusters is not None else None
lowercase__ : Tuple = do_resize
lowercase__ : List[str] = size
lowercase__ : List[str] = resample
lowercase__ : Any = do_normalize
lowercase__ : Tuple = do_color_quantize
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size["height"], size["width"]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
lowercase__ : Union[str, Any] = rescale(image=lowercase_ , scale=1 / 1_27.5 , data_format=lowercase_ )
lowercase__ : Tuple = image - 1
return image
def __UpperCamelCase ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowercase_ : int , ) -> PIL.Image.Image:
lowercase__ : str = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = size if size is not None else self.size
lowercase__ : Dict = get_size_dict(lowercase_ )
lowercase__ : Optional[int] = resample if resample is not None else self.resample
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase__ : Any = clusters if clusters is not None else self.clusters
lowercase__ : Optional[Any] = np.array(lowercase_ )
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : Any = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : List[Any] = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
lowercase__ : Union[str, Any] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase__ : List[Any] = np.array(lowercase_ )
lowercase__ : Dict = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase__ : Union[str, Any] = images.shape[0]
lowercase__ : Tuple = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase__ : str = list(lowercase_ )
else:
lowercase__ : Union[str, Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 333 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 1 |
"""simple docstring"""
import argparse
lowerCamelCase_ : int = """docs/source/_static/js/custom.js"""
def _A ( lowercase ):
"""simple docstring"""
with open(lowercase , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.readlines()
a =0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
a =f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
lowerCamelCase_ : Optional[Any] = parser.parse_args()
update_custom_js(args.version) | 81 | '''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31 | 0 |
from scipy.stats import spearmanr
import datasets
_snake_case : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case : Dict = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case : Optional[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Dict:
__snake_case : Optional[int] = spearmanr(lowerCamelCase , lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 134 |
from scipy.stats import spearmanr
import datasets
_snake_case : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
_snake_case : Dict = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
_snake_case : Optional[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple=False ) -> Dict:
__snake_case : Optional[int] = spearmanr(lowerCamelCase , lowerCamelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 134 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowercase ( a__ ) -> Any:
if not is_accelerate_available():
return method
__SCREAMING_SNAKE_CASE = version.parse(accelerate.__version__ ).base_version
if version.parse(__A ) < version.parse('0.17.0' ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__A , **__A )
return wrapper
| 257 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=4 , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = parent
_SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
_SCREAMING_SNAKE_CASE : Any = seq_length
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
_SCREAMING_SNAKE_CASE : Any = use_attention_mask
_SCREAMING_SNAKE_CASE : Any = use_token_type_ids
_SCREAMING_SNAKE_CASE : str = use_labels
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
_SCREAMING_SNAKE_CASE : List[str] = hidden_size
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : Any = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
_SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : str = num_choices
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
_SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Any = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : str = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : List[str] = 5_0_0_0_0
_SCREAMING_SNAKE_CASE : Any = (1, 6, vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) ) | 325 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite) | 325 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (_UpperCamelCase ):
with open(_UpperCamelCase , 'rb' ) as f:
__lowerCAmelCase : Optional[int] = Image.open(_UpperCamelCase )
return im.convert('RGB' )
@dataclass
class A__ :
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
A_ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __lowerCamelCase ( self ):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class A__ :
A_ : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCamelCase)} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : str = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
__lowerCAmelCase : Dict = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
__lowerCAmelCase : Union[str, Any] = {}
if data_args.train_dir is not None:
__lowerCAmelCase : Dict = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
__lowerCAmelCase : Union[str, Any] = os.path.join(data_args.validation_dir , '**' )
__lowerCAmelCase : Optional[int] = load_dataset(
'imagefolder' , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase : Union[str, Any] = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__lowerCAmelCase : Tuple = dataset['train'].train_test_split(data_args.train_val_split )
__lowerCAmelCase : List[str] = split['train']
__lowerCAmelCase : Dict = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__lowerCAmelCase : Any = dataset['train'].features['labels'].names
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = {}, {}
for i, label in enumerate(_UpperCamelCase ):
__lowerCAmelCase : Any = str(_UpperCamelCase )
__lowerCAmelCase : Tuple = label
# Load the accuracy metric from the datasets package
__lowerCAmelCase : Optional[Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase : Optional[int] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__lowerCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__lowerCAmelCase : List[Any] = image_processor.size['shortest_edge']
else:
__lowerCAmelCase : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
__lowerCAmelCase : List[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__lowerCAmelCase : Tuple = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__lowerCAmelCase : Union[str, Any] = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_UpperCamelCase ):
__lowerCAmelCase : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCAmelCase : List[str] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCAmelCase : List[Any] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
__lowerCAmelCase : Optional[Any] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : List[str] = last_checkpoint
__lowerCAmelCase : Dict = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase : Tuple = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
__lowerCAmelCase : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="resnet50" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : Union[str, Any] = stage_names
SCREAMING_SNAKE_CASE : Union[str, Any] = out_features
SCREAMING_SNAKE_CASE : List[str] = backbone
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = is_training
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self ) ->str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = TimmBackbone(config=_a )
model.to(_a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(_a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a_ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (TimmBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowerCAmelCase ( self ) ->Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'resnet18'
SCREAMING_SNAKE_CASE : Dict = 'microsoft/resnet-18'
SCREAMING_SNAKE_CASE : Optional[int] = AutoBackbone.from_pretrained(_a , use_timm_backbone=_a )
SCREAMING_SNAKE_CASE : Optional[int] = AutoBackbone.from_pretrained(_a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(_a , use_timm_backbone=_a , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : List[Any] = AutoBackbone.from_pretrained(_a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self ) ->str:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->Optional[Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self ) ->Any:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self ) ->Optional[int]:
pass
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(_a )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : Any = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_a )
model.to(_a )
SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(_a , _a )
SCREAMING_SNAKE_CASE : Any = model(**_a )
SCREAMING_SNAKE_CASE : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(**_a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_a )
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : List[Any] = model_class(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(**_a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_a )
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : int = model_class(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(**_a )
| 355 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a__ )
SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=a__ )
env_command_parser(subparsers=a__ )
launch_command_parser(subparsers=a__ )
tpu_command_parser(subparsers=a__ )
test_command_parser(subparsers=a__ )
# Let's go
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if not hasattr(a__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(a__ )
if __name__ == "__main__":
main()
| 19 | 0 |
"""simple docstring"""
class __snake_case :
def __init__( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_lowerCamelCase : Tuple = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : list[str] ):
"""simple docstring"""
for word in words:
self.insert(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
_lowerCamelCase : List[str] = TrieNode()
_lowerCamelCase : Union[str, Any] = curr.nodes[char]
_lowerCamelCase : Any = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
return False
_lowerCamelCase : Union[str, Any] = curr.nodes[char]
return curr.is_leaf
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
def _delete(__lowerCAmelCase : TrieNode , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> bool:
if index == len(__lowerCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCamelCase : List[Any] = False
return len(curr.nodes ) == 0
_lowerCamelCase : List[Any] = word[index]
_lowerCamelCase : Optional[Any] = curr.nodes.get(__lowerCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCamelCase : Union[str, Any] = _delete(__lowerCAmelCase , __lowerCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowerCAmelCase , 0 )
def snake_case_ ( A_ : TrieNode, A_ : str ):
'''simple docstring'''
if node.is_leaf:
print(A_, end=''' ''' )
for key, value in node.nodes.items():
print_words(A_, word + key )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = '''banana bananas bandana band apple all beast'''.split()
_lowerCamelCase : List[str] = TrieNode()
root.insert_many(A_ )
# print_words(root, "")
assert all(root.find(A_ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ ( A_ : str, A_ : bool ):
'''simple docstring'''
print(str(A_ ), '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ( ):
'''simple docstring'''
assert test_trie()
def snake_case_ ( ):
'''simple docstring'''
print_results('''Testing trie functionality''', test_trie() )
if __name__ == "__main__":
main()
| 72 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_02_17_66_34E-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.35_58_18,
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(__UpperCamelCase )}"
)
raise ValueError(__UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _A ( __A ):
_SCREAMING_SNAKE_CASE : List[str] = "Speech2TextFeatureExtractor"
_SCREAMING_SNAKE_CASE : Optional[Any] = "Speech2TextTokenizer"
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.feature_extractor
__UpperCAmelCase : str = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__UpperCAmelCase : int = kwargs.pop("""raw_speech""" )
else:
__UpperCAmelCase : Any = kwargs.pop("""audio""" , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""sampling_rate""" , __UpperCAmelCase )
__UpperCAmelCase : int = kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__UpperCAmelCase : Optional[Any] = args[0]
__UpperCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCAmelCase : int = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
__UpperCAmelCase : Dict = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __A ( self ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__UpperCAmelCase : int = True
__UpperCAmelCase : Union[str, Any] = self.tokenizer
yield
__UpperCAmelCase : str = self.feature_extractor
__UpperCAmelCase : List[str] = False
| 254 | import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
A_ : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(__lowercase )
A_ , A_ : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
__lowercase ,output_loading_info=__lowercase )
else:
A_ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(__lowercase )
A_ , A_ : str = ProphetNetForConditionalGeneration.from_pretrained(
__lowercase ,output_loading_info=__lowercase )
A_ : Any = ['key_proj', 'value_proj', 'query_proj']
A_ : str = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
A_ : Optional[Any] = key.split('.' )
if attributes[0] == "lm_head":
A_ : int = prophet
A_ : int = prophet_old
else:
A_ : Tuple = prophet.prophetnet
A_ : Optional[Any] = prophet_old.model
A_ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
A_ : Dict = mapping[attribute]
if not hasattr(__lowercase ,__lowercase ) and len(__lowercase ) > 0:
A_ : Union[str, Any] = attribute
elif hasattr(__lowercase ,__lowercase ):
A_ : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A_ : List[Any] = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
A_ : Dict = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A_ : Optional[int] = old_model.bias
logger.info(f'''{attribute} is initialized''' )
A_ : List[str] = True
break
elif attribute in special_keys and hasattr(__lowercase ,'in_proj_weight' ):
A_ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
A_ : Optional[int] = getattr(__lowercase ,__lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A_ : Any = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A_ : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A_ : Tuple = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A_ : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A_ : Union[str, Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
A_ : Any = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
A_ : Union[str, Any] = True
break
if attribute.isdigit():
A_ : str = model[int(__lowercase )]
A_ : List[str] = old_model[int(__lowercase )]
else:
A_ : int = getattr(__lowercase ,__lowercase )
if old_attribute == "":
A_ : List[str] = old_model
else:
if not hasattr(__lowercase ,__lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
A_ : Union[str, Any] = getattr(__lowercase ,__lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 140 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=False ):
"""simple docstring"""
a :Any = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a :List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a :List[Any] = ''''''
else:
a :Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a :Dict = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a :List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a :Optional[int] = in_proj_weight[
: config.hidden_size, :
]
a :Dict = in_proj_bias[: config.hidden_size]
a :Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a :str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a :Tuple = in_proj_weight[
-config.hidden_size :, :
]
a :Any = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :str = dct.pop(UpperCAmelCase_ )
a :Any = val
def __lowerCamelCase ( ):
"""simple docstring"""
a :List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a :Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=False ):
"""simple docstring"""
a :Any = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=UpperCAmelCase_ , )
a :str = ViTHybridConfig(backbone_config=UpperCAmelCase_ , image_size=384 , num_labels=1000 )
a :Tuple = False
# load original model from timm
a :List[Any] = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a :Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCAmelCase_ )
a :int = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Tuple = '''huggingface/label-files'''
a :Optional[Any] = '''imagenet-1k-id2label.json'''
a :List[str] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
a :List[str] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
a :Dict = idalabel
a :List[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a :List[Any] = ViTHybridModel(UpperCAmelCase_ ).eval()
else:
a :Optional[int] = ViTHybridForImageClassification(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# create image processor
a :List[Any] = create_transform(**resolve_data_config({} , model=UpperCAmelCase_ ) )
a :Union[str, Any] = transform.transforms
a :int = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
a :List[str] = ViTHybridImageProcessor(
do_resize=UpperCAmelCase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCAmelCase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=UpperCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a :Union[str, Any] = prepare_img()
a :Tuple = transform(UpperCAmelCase_ ).unsqueeze(0 )
a :Any = processor(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
# verify logits
with torch.no_grad():
a :Dict = model(UpperCAmelCase_ )
a :List[str] = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
a :List[Any] = timm_model.forward_features(UpperCAmelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCAmelCase_ , outputs.pooler_output , atol=1E-3 )
else:
a :List[Any] = timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
snake_case : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 370 |
import math
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
"""simple docstring"""
return math.pow(UpperCAmelCase_ , 2 ) - a
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
return 2 * x
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
a :int = 2.0
while start <= a:
a :int = math.pow(UpperCAmelCase_ , 2 )
return start
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
a :List[Any] = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
a :Optional[int] = value
a :int = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 0 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : int = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Any = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Tuple = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict=False ) -> List[Any]:
"""simple docstring"""
if return_pvalue:
__magic_name__ = pearsonr(_A , _A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_A , _A )[0] )}
| 88 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = {}
if train_file is not None:
lowerCamelCase__ : str = [train_file]
if eval_file is not None:
lowerCamelCase__ : List[Any] = [eval_file]
if test_file is not None:
lowerCamelCase__ : int = [test_file]
lowerCamelCase__ : str = datasets.load_dataset('''csv''' , data_files=UpperCAmelCase )
lowerCamelCase__ : int = list(ds[list(files.keys() )[0]].features.keys() )
lowerCamelCase__ : Union[str, Any] = features_name.pop(UpperCAmelCase )
lowerCamelCase__ : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCamelCase__ : int = {label: i for i, label in enumerate(UpperCAmelCase )}
lowerCamelCase__ : Any = tokenizer.model_input_names
lowerCamelCase__ : List[Any] = {}
if len(UpperCAmelCase ) == 1:
for k in files.keys():
lowerCamelCase__ : Optional[int] = ds[k].map(
lambda UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' ) , batched=UpperCAmelCase , )
elif len(UpperCAmelCase ) == 2:
for k in files.keys():
lowerCamelCase__ : int = ds[k].map(
lambda UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , ) , batched=UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCamelCase__ : int = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase__ : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
lowerCamelCase__ : Any = labelaid[ex[label_name]]
yield (d, label)
lowerCamelCase__ : Dict = (
tf.data.Dataset.from_generator(
UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCamelCase__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCamelCase__ : str = (
tf.data.Dataset.from_generator(
UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCamelCase__ : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCamelCase__ : str = (
tf.data.Dataset.from_generator(
UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCamelCase__ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A : Dict = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : int = field(metadata={"help": "Which column contains the label"} )
_UpperCAmelCase : str = field(default=lowerCAmelCase_ ,metadata={"help": "The path of the training file"} )
_UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ ,metadata={"help": "The path of the development file"} )
_UpperCAmelCase : Optional[str] = field(default=lowerCAmelCase_ ,metadata={"help": "The path of the test file"} )
_UpperCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase : bool = field(
default=lowerCAmelCase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase : bool = field(default=lowerCAmelCase_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase : Optional[str] = field(
default=lowerCAmelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
def _a ( ) -> str:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCamelCase__ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCAmelCase ) , labelaid=UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCamelCase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCAmelCase ) -> Dict:
lowerCamelCase__ : Any = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCamelCase__ : List[str] = TFTrainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase__ : Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ : Tuple = trainer.evaluate()
lowerCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 265 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCAmelCase ( ):
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 145 | '''simple docstring'''
from __future__ import annotations
__a = list[tuple[int, int]]
__a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float , lowerCAmelCase__ : Node | None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = pos_x
_UpperCAmelCase : List[Any] = pos_y
_UpperCAmelCase : Optional[int] = (pos_y, pos_x)
_UpperCAmelCase : Tuple = goal_x
_UpperCAmelCase : List[str] = goal_y
_UpperCAmelCase : str = g_cost
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : str = self.calculate_heuristic()
def _lowerCAmelCase ( self : str ) -> float:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = abs(self.pos_x - self.goal_x )
_UpperCAmelCase : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , lowerCAmelCase__ : Optional[int] ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : tuple[int, int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ )
_UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = [self.start]
_UpperCAmelCase : list[Node] = []
_UpperCAmelCase : List[Any] = False
def _lowerCAmelCase ( self : Tuple ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase : List[str] = True
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : List[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Node ) -> list[Node]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
for action in delta:
_UpperCAmelCase : Tuple = parent.pos_x + action[1]
_UpperCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) )
return successors
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Node | None ) -> Path:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = node
_UpperCAmelCase : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a = (0, 0)
__a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
__a = GreedyBestFirst(init, goal)
__a = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a = 2
for elem in grid:
print(elem) | 145 | 1 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {'''vocab_file''': '''vocab.txt'''}
snake_case : Optional[int] = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
snake_case : List[str] = {
'''facebook/esm2_t6_8M_UR50D''': 10_24,
'''facebook/esm2_t12_35M_UR50D''': 10_24,
}
def __lowercase ( __lowerCAmelCase : Dict ):
with open(__lowerCAmelCase , 'r' ) as f:
a__ = f.read().splitlines()
return [l.strip() for l in lines]
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[int] ,__snake_case :int ,__snake_case :int="<unk>" ,__snake_case :Any="<cls>" ,__snake_case :Optional[Any]="<pad>" ,__snake_case :List[str]="<mask>" ,__snake_case :Union[str, Any]="<eos>" ,**__snake_case :List[Any] ,) -> Dict:
super().__init__(**__snake_case )
a__ = load_vocab_file(__snake_case )
a__ = dict(enumerate(self.all_tokens ) )
a__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
a__ = unk_token
a__ = cls_token
a__ = pad_token
a__ = mask_token
a__ = eos_token
a__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCamelCase__( self :Dict ,__snake_case :int ) -> str:
return self._id_to_token.get(__snake_case ,self.unk_token )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ) -> int:
return self._token_to_id.get(__snake_case ,self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any] ,**__snake_case :int ) -> Any:
return text.split()
def lowerCamelCase__( self :List[Any] ,__snake_case :Any=False ) -> str:
return len(self._id_to_token )
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCamelCase__( self :Any ,__snake_case :str ) -> int:
return self._token_to_id.get(__snake_case ,self._token_to_id.get(self.unk_token ) )
def lowerCamelCase__( self :Tuple ,__snake_case :int ) -> str:
return self._id_to_token.get(__snake_case ,self.unk_token )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.cls_token_id]
a__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase__( self :Tuple ,__snake_case :List ,__snake_case :Optional[List] = None ,__snake_case :bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
a__ = [1] + ([0] * len(__snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(__snake_case ) + [1]
return mask
def lowerCamelCase__( self :Any ,__snake_case :str ,__snake_case :List[str] ) -> Optional[Any]:
a__ = os.path.join(__snake_case ,(filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(__snake_case ,'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCamelCase__( self :Tuple ) -> int:
return self.get_vocab_size(with_added_tokens=__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union[List[str], List[AddedToken]] ,__snake_case :bool = False ) -> int:
return super()._add_tokens(__snake_case ,special_tokens=__snake_case )
| 358 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[int] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 109 | 0 |
"""simple docstring"""
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCAmelCase = 6
lowerCAmelCase = 1
lowerCAmelCase = 1_901
lowerCAmelCase = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCAmelCase = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 155 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 22 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: int ) -> str:
'''simple docstring'''
__lowerCamelCase : List[Any] = Path(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = Path(_lowerCamelCase )
dest_dir.mkdir(exist_ok=_lowerCamelCase )
for path in src_dir.iterdir():
__lowerCamelCase : Optional[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCamelCase : Union[str, Any] = dest_dir.joinpath(path.name )
print(_lowerCamelCase )
dest_path.open("w" ).write("\n".join(_lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify) | 363 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 100 ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : Optional[Any] = n + 1 # maximum limit
for a in range(2 , _lowerCamelCase ):
for b in range(2 , _lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 64 | 0 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Any:
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ) -> List[Any]:
__lowercase = [[float('inf' ) for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
__lowercase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__lowercase = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = int(input("""Enter number of vertices: """))
SCREAMING_SNAKE_CASE__ = int(input("""Enter number of edges: """))
SCREAMING_SNAKE_CASE__ = [[float("""inf""") for i in range(v)] for j in range(v)]
for i in range(v):
SCREAMING_SNAKE_CASE__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("""\nEdge """, i + 1)
SCREAMING_SNAKE_CASE__ = int(input("""Enter source:"""))
SCREAMING_SNAKE_CASE__ = int(input("""Enter destination:"""))
SCREAMING_SNAKE_CASE__ = float(input("""Enter weight:"""))
SCREAMING_SNAKE_CASE__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 325 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
return collection
def circle_sort_util(UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> bool:
lowerCAmelCase__ : str = False
if low == high:
return swapped
lowerCAmelCase__ : Optional[int] = low
lowerCAmelCase__ : int = high
while left < right:
if collection[left] > collection[right]:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = (
collection[right],
collection[left],
)
lowerCAmelCase__ : Dict = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowerCAmelCase__ , lowerCAmelCase__ : Any = (
collection[right + 1],
collection[left],
)
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = low + int((high - low) / 2 )
lowerCAmelCase__ : Optional[int] = circle_sort_util(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = circle_sort_util(UpperCamelCase , mid + 1 , UpperCamelCase )
return swapped or left_swap or right_swap
lowerCAmelCase__ : Any = True
while is_not_sorted is True:
lowerCAmelCase__ : str = circle_sort_util(UpperCamelCase , 0 , len(UpperCamelCase ) - 1 )
return collection
if __name__ == "__main__":
_lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 184 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 184 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
_lowercase = '''\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'''
_lowercase = '''\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'''
_lowercase = '''\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'''
def _snake_case ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : str , snake_case__ : Any = None , snake_case__ : List[Any] = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A = new_id
# turn into Numpy arrays
A = np.array(lowerCAmelCase_ )
A = np.array(lowerCAmelCase_ )
if reduce_labels:
A = 255
A = label - 1
A = 255
A = label != ignore_index
A = np.not_equal(lowerCAmelCase_ , lowerCAmelCase_ )
A = pred_label[mask]
A = np.array(lowerCAmelCase_ )[mask]
A = pred_label[pred_label == label]
A = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
A = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
A = np.histogram(lowerCAmelCase_ , bins=lowerCAmelCase_ , range=(0, num_labels - 1) )[0]
A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] = None , snake_case__ : Dict = False , ):
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
A , A , A , A = intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : int = None , snake_case__ : List[Any] = None , snake_case__ : List[Any] = False , ):
A , A , A , A = total_intersect_and_union(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# compute metrics
A = {}
A = total_area_intersect.sum() / total_area_label.sum()
A = total_area_intersect / total_area_union
A = total_area_intersect / total_area_label
A = np.nanmean(lowerCAmelCase_ )
A = np.nanmean(lowerCAmelCase_ )
A = all_acc
A = iou
A = acc
if nan_to_num is not None:
A = {metric: np.nan_to_num(lowerCAmelCase_ , nan=lowerCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) ,reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Tuple ,A_ : Optional[int] ,A_ : int ,A_ : bool ,A_ : Optional[int] = None ,A_ : Optional[Dict[int, int]] = None ,A_ : bool = False ,) -> List[Any]:
A = mean_iou(
results=A_ ,gt_seg_maps=A_ ,num_labels=A_ ,ignore_index=A_ ,nan_to_num=A_ ,label_map=A_ ,reduce_labels=A_ ,)
return iou_result | 74 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : Any ,snake_case : Dict=100 ,snake_case : List[Any]=13 ,snake_case : str=30 ,snake_case : List[str]=2 ,snake_case : List[Any]=3 ,snake_case : Tuple=True ,snake_case : Optional[Any]=True ,snake_case : int=32 ,snake_case : Tuple=4 ,snake_case : List[Any]=4 ,snake_case : Optional[Any]=37 ,snake_case : Optional[Any]="gelu" ,snake_case : Tuple=0.1 ,snake_case : Union[str, Any]=0.1 ,snake_case : List[Any]=10 ,snake_case : Tuple=0.02 ,snake_case : List[str]=3 ,snake_case : Any=None ,snake_case : int=[0, 1, 2, 3] ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =100
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE =num_patches + 1
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Dict ):
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Tuple ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =BeitModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Dict ,snake_case : Any ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Any ,snake_case : str ,snake_case : Any ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =BeitForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Tuple ,snake_case : str ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =BeitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case ,nn.Linear ) )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
def _lowerCAmelCase ( self : Any ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.gradient_checkpointing_enable()
model.to(snake_case )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
SCREAMING_SNAKE_CASE =model(**snake_case ).loss
loss.backward()
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =_config_zero_init(snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(config=snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@slow
def _lowerCAmelCase ( self : List[str] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =BeitModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).pixel_values.to(snake_case )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE =torch.ones((1, 196) ,dtype=torch.bool ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(pixel_values=snake_case ,bool_masked_pos=snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,snake_case ,atol=1e-2 ) )
@slow
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =281
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 21841) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case )
self.assertTrue(torch.allclose(logits[0, :3] ,snake_case ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE =2396
self.assertEqual(logits.argmax(-1 ).item() ,snake_case )
@slow
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] ,device=snake_case ,)
else:
SCREAMING_SNAKE_CASE =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] ,device=snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE =model.to(snake_case )
SCREAMING_SNAKE_CASE =BeitImageProcessor(do_resize=snake_case ,size=640 ,do_center_crop=snake_case )
SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
SCREAMING_SNAKE_CASE =Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case ,target_sizes=[(500, 300)] )
SCREAMING_SNAKE_CASE =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,snake_case )
SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=snake_case )
SCREAMING_SNAKE_CASE =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,snake_case )
| 334 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCAmelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase (a_ :list[float]) -> list[float]:
lowercase :int = []
lowercase :List[str] = len(a_)
for i in range(a_):
lowercase :float = -1
for j in range(i + 1 , a_):
if arr[i] < arr[j]:
lowercase :Dict = arr[j]
break
result.append(a_)
return result
def lowerCamelCase (a_ :list[float]) -> list[float]:
lowercase :Dict = []
for i, outer in enumerate(a_):
lowercase :float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase :Optional[int] = inner
break
result.append(a_)
return result
def lowerCamelCase (a_ :list[float]) -> list[float]:
lowercase :str = len(a_)
lowercase :list[float] = []
lowercase :list[float] = [-1] * arr_size
for index in reversed(range(a_)):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase :Optional[Any] = stack[-1]
stack.append(arr[index])
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 359 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __magic_name__ ( __UpperCAmelCase ):
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] = None
def lowerCamelCase (a_ :List[Any] , a_ :List[str]=0.9_99 , a_ :List[Any]="cosine" , ) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ :Union[str, Any]):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ :Tuple):
return math.exp(t * -12.0)
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""")
lowercase :str = []
for i in range(a_):
lowercase :Optional[Any] = i / num_diffusion_timesteps
lowercase :Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_) / alpha_bar_fn(a_) , a_))
return torch.tensor(a_ , dtype=torch.floataa)
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = 1
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.00_01 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : int = 0 , snake_case__ : str = "epsilon" , snake_case__ : float = 1.0 , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
if kwargs.get('''set_alpha_to_one''' , snake_case__ ) is not None:
lowercase :Any = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :str = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
lowercase :Any = torch.tensor(snake_case__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :Optional[Any] = torch.linspace(snake_case__ , snake_case__ , snake_case__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Dict = betas_for_alpha_bar(snake_case__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase :int = 1.0 - self.betas
lowercase :int = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Any = 1.0
# setable values
lowercase :Dict = None
lowercase :int = torch.from_numpy(np.arange(0 , snake_case__ ).copy().astype(np.intaa ) )
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __snake_case ( self : Dict , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase :Any = num_inference_steps
lowercase :Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , snake_case__ ) * step_ratio).round().copy().astype(np.intaa )
lowercase :Any = torch.from_numpy(snake_case__ ).to(snake_case__ )
self.timesteps += self.config.steps_offset
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = False , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : bool = True , ):
'''simple docstring'''
lowercase :Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :str = model_output
elif self.config.prediction_type == "sample":
lowercase :List[Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 172 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ ( ):
_A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""",type=snake_case_,default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""",type=snake_case_,default=5 )
parser.add_argument("""--batch_size""",type=snake_case_,default=6 )
parser.add_argument("""--gradient_accumulation_steps""",type=snake_case_,default=1 )
parser.add_argument("""--freeze""",type=snake_case_,default=snake_case_ )
parser.add_argument("""--learning_rate""",type=snake_case_,default=5e-4 )
parser.add_argument("""--seed""",type=snake_case_,default=0 )
parser.add_argument("""--lr_scheduler_type""",type=snake_case_,default="""cosine""" )
parser.add_argument("""--num_warmup_steps""",type=snake_case_,default=10 )
parser.add_argument("""--weight_decay""",type=snake_case_,default=0.01 )
parser.add_argument("""--output_dir""",type=snake_case_,default="""./results""" )
return parser.parse_args()
_snake_case = load("accuracy")
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : str = eval_pred
_A : Optional[Any] = np.argmax(snake_case_,axis=1 )
return metric.compute(predictions=snake_case_,references=snake_case_ )
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a ) -> None:
super().__init__()
_A : Dict = trainer
def a__ ( self , _a , _a , _a , **_a ) -> str:
if control.should_evaluate:
_A : Tuple = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def lowerCAmelCase_ ( ):
_A : List[Any] = get_args()
set_seed(args.seed )
_A : Optional[int] = load_dataset("""codeparrot/codecomplex""",split="""train""" )
_A : Optional[int] = dataset.train_test_split(test_size=0.2 )
_A : str = train_test["""test"""].train_test_split(test_size=0.5 )
_A : Optional[int] = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
_A : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_A : List[str] = tokenizer.eos_token
_A : int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt,num_labels=7 )
_A : Union[str, Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_A : int = False
_A : int = ClassLabel(num_classes=7,names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(snake_case_ ):
_A : List[str] = tokenizer(example["""src"""],truncation=snake_case_,max_length=1024 )
_A : List[Any] = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_A : Optional[Any] = train_test_validation.map(
snake_case_,batched=snake_case_,remove_columns=train_test_validation["""train"""].column_names,)
_A : List[str] = DataCollatorWithPadding(tokenizer=snake_case_ )
_A : int = TrainingArguments(
output_dir=args.output_dir,learning_rate=args.learning_rate,lr_scheduler_type=args.lr_scheduler_type,evaluation_strategy="""epoch""",save_strategy="""epoch""",logging_strategy="""epoch""",per_device_train_batch_size=args.batch_size,per_device_eval_batch_size=args.batch_size,num_train_epochs=args.num_epochs,gradient_accumulation_steps=args.gradient_accumulation_steps,weight_decay=0.01,metric_for_best_model="""accuracy""",run_name="""complexity-java""",report_to="""wandb""",)
_A : Dict = Trainer(
model=snake_case_,args=snake_case_,train_dataset=tokenized_datasets["""train"""],eval_dataset=tokenized_datasets["""valid"""],tokenizer=snake_case_,data_collator=snake_case_,compute_metrics=snake_case_,)
print("""Training...""" )
trainer.add_callback(CustomCallback(snake_case_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 26 |
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ = arr[j]
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
for i, outer in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ = inner
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [-1] * arr_size
for index in reversed(range(__UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
) | 225 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : str = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : Dict = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowercase )
assert base_extractor.is_extractable(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(_lowercase , _lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Union[str, Any] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : Dict = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
SCREAMING_SNAKE_CASE : List[str] = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[Any] = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowercase )
SCREAMING_SNAKE_CASE : Any = Extractor.infer_extractor_format(_lowercase )
assert extractor_format is not None
SCREAMING_SNAKE_CASE : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(_lowercase , _lowercase , _lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def A ( _lowercase , _lowercase ):
import tarfile
SCREAMING_SNAKE_CASE : List[str] = tmp_path / '''data_dot_dot'''
directory.mkdir()
SCREAMING_SNAKE_CASE : Optional[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(_lowercase , '''w''' ) as f:
f.add(_lowercase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def A ( _lowercase ):
import tarfile
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / '''data_sym_link'''
directory.mkdir()
SCREAMING_SNAKE_CASE : List[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=_lowercase )
with tarfile.TarFile(_lowercase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE : List[str] = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE : int = tmp_path / '''extracted'''
TarExtractor.extract(_lowercase , _lowercase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def A ( _lowercase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
SCREAMING_SNAKE_CASE : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE : Optional[Any] = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(_lowercase )
assert zipfile.is_zipfile(str(_lowercase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowercase ) # but we're right
| 258 | import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _lowercase ):
return (data["data"], data["target"])
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowercase , _lowercase )
# Predict target for test data
SCREAMING_SNAKE_CASE : Optional[int] = xgb.predict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = predictions.reshape(len(_lowercase ) , 1 )
return predictions
def A ( ):
SCREAMING_SNAKE_CASE : str = fetch_california_housing()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_split(
_lowercase , _lowercase , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE : Any = xgboost(_lowercase , _lowercase , _lowercase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 258 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase : Union[str, Any] = TypeVar('''T''')
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (position - 1) // 2
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 1
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 2
class a__ ( Generic[T] ):
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[tuple[T, int]] = []
SCREAMING_SNAKE_CASE_ : dict[T, int] = {}
SCREAMING_SNAKE_CASE_ : int = 0
def __len__( self : List[str] ):
"""simple docstring"""
return self.elements
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.heap )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return self.elements == 0
def __UpperCamelCase ( self : str,_A : T,_A : int ):
"""simple docstring"""
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0,self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.heap[0]
self._bubble_down(_A )
return elem
def __UpperCamelCase ( self : Union[str, Any],_A : T,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.position_map[elem]
SCREAMING_SNAKE_CASE_ : int = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ : int = get_parent_position(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A )
else:
self._bubble_down(_A )
else:
self._bubble_down(_A )
def __UpperCamelCase ( self : List[Any],_A : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ : Tuple = get_parent_position(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A,_A )
return self._bubble_up(_A )
return None
def __UpperCamelCase ( self : Optional[Any],_A : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ : Optional[int] = get_child_left_position(_A )
SCREAMING_SNAKE_CASE_ : int = get_child_right_position(_A )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
return None
def __UpperCamelCase ( self : Optional[int],_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ : Tuple = nodea_pos
SCREAMING_SNAKE_CASE_ : List[Any] = nodea_pos
class a__ ( Generic[T] ):
def __init__( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[T, dict[T, int]] = {}
SCREAMING_SNAKE_CASE_ : int = 0
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(self.connections )
def __len__( self : str ):
"""simple docstring"""
return self.nodes
def __UpperCamelCase ( self : Tuple,_A : T ):
"""simple docstring"""
if node not in self.connections:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
self.nodes += 1
def __UpperCamelCase ( self : Tuple,_A : T,_A : T,_A : int ):
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
def _snake_case ( lowerCAmelCase : GraphUndirectedWeighted[T] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[T, int] = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ : dict[T, T | None] = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase , lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ : Optional[Any] = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ : int = node
return dist, parent
| 18 |
import socket
def _a ( ):
"""simple docstring"""
lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ = socket.gethostname()
lowercase__ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowercase__ = sock.recv(10_24 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 110 | 0 |
def A ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
__UpperCamelCase : Optional[Any] =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__UpperCamelCase : Tuple =1
if upper_limit > 0:
__UpperCamelCase : Dict =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 ,upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
A_ :Any = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 245 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def __lowercase ( self , lowerCamelCase__=0 , lowerCamelCase__=(4, 4, 64, 64) , lowerCamelCase__=False ):
"""simple docstring"""
__UpperCamelCase : str =jnp.bfloataa if fpaa else jnp.floataa
__UpperCamelCase : Optional[Any] =jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ )
return image
def __lowercase ( self , lowerCamelCase__=False , lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
__UpperCamelCase : List[Any] =jnp.bfloataa if fpaa else jnp.floataa
__UpperCamelCase : Optional[int] ='bf16' if fpaa else None
__UpperCamelCase , __UpperCamelCase : Any =FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__ , subfolder='unet' , dtype=lowerCamelCase__ , revision=lowerCamelCase__ )
return model, params
def __lowercase ( self , lowerCamelCase__=0 , lowerCamelCase__=(4, 77, 768) , lowerCamelCase__=False ):
"""simple docstring"""
__UpperCamelCase : str =jnp.bfloataa if fpaa else jnp.floataa
__UpperCamelCase : Optional[int] =jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) , dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_latents(lowerCamelCase__ , fpaa=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.get_encoder_hidden_states(lowerCamelCase__ , fpaa=lowerCamelCase__ )
__UpperCamelCase : List[str] =model.apply(
{'params': params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample
assert sample.shape == latents.shape
__UpperCamelCase : List[str] =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCamelCase : int =jnp.array(lowerCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Dict =self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.get_latents(lowerCamelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase__ )
__UpperCamelCase : int =self.get_encoder_hidden_states(lowerCamelCase__ , shape=(4, 77, 1024) , fpaa=lowerCamelCase__ )
__UpperCamelCase : str =model.apply(
{'params': params} , lowerCamelCase__ , jnp.array(lowerCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase__ , ).sample
assert sample.shape == latents.shape
__UpperCamelCase : int =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCamelCase : Optional[Any] =jnp.array(lowerCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 )
| 245 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Dict =PegasusTokenizer
lowercase : List[str] =PegasusTokenizerFast
lowercase : Any =True
lowercase : Tuple =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ =PegasusTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''</s>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''</s>''' )
self.assertEqual(vocab_keys[-1], '''v''' )
self.assertEqual(len(lowerCAmelCase ), 1_103 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_103 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =(
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ =rust_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
lowerCamelCase_ =py_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ ='''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ =[2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowerCamelCase_ =tokenizer([raw_input_str], return_tensors=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowerCamelCase_ ='''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ =[413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowerCamelCase_ =tokenizer([raw_input_str], return_tensors=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =['''This is going to be way too long.''' * 150, '''short example''']
lowerCamelCase_ =['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ =self._large_tokenizer(lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
lowerCamelCase_ =self._large_tokenizer(
text_target=lowerCAmelCase, max_length=5, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={'''input_ids''': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', )
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =PegasusTokenizer
lowercase : List[str] =PegasusTokenizerFast
lowercase : List[Any] =True
lowercase : Union[str, Any] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ =PegasusTokenizer(lowerCAmelCase, offset=0, mask_token_sent=lowerCAmelCase, mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ =(
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ =rust_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
lowerCamelCase_ =py_tokenizer([raw_input_str], return_tensors=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids[0]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =['''This is going to be way too long.''' * 1_000, '''short example''']
lowerCamelCase_ =['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ =self._large_tokenizer(lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
lowerCamelCase_ =self._large_tokenizer(
text_target=lowerCAmelCase, max_length=5, padding=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase ) == 2 # input_ids, attention_mask.
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =(
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ =self._large_tokenizer(lowerCAmelCase ).input_ids
self.assertListEqual(
lowerCAmelCase, [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1], )
| 75 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def a_ ( __snake_case : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ =checkpoints.load_tax_checkpoint(__snake_case )
lowerCamelCase_ =flatten_dict(__snake_case )
return flax_params
def a_ ( __snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ ={
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ ='''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ =new_key.replace(__snake_case , __snake_case )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ =re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , __snake_case )
lowerCamelCase_ =flax_dict[key]
lowerCamelCase_ ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ =torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Any=False , __snake_case : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_flax_param(__snake_case )
if not use_large:
lowerCamelCase_ =PixaStructVisionConfig()
lowerCamelCase_ =PixaStructTextConfig()
else:
lowerCamelCase_ =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__snake_case )
lowerCamelCase_ =PixaStructForConditionalGeneration(__snake_case )
lowerCamelCase_ =rename_and_convert_flax_params(__snake_case )
model.load_state_dict(__snake_case )
lowerCamelCase_ =AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ =PixaStructImageProcessor()
lowerCamelCase_ =PixaStructProcessor(image_processor=__snake_case , tokenizer=__snake_case )
if use_large:
lowerCamelCase_ =4096
lowerCamelCase_ =True
# mkdir if needed
os.makedirs(__snake_case , exist_ok=__snake_case )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
print('''Model saved in {}'''.format(__snake_case ) )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
a_ : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 75 | 1 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowercase = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : Optional[int], a_ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase__ = (
os.path.join(a_, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase__ = Extractor
def lowercase_ ( self : Any, a_ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase__ = os.path.abspath(a_ )
return os.path.join(self.extract_dir, hash_url_to_filename(a_ ) )
def lowercase_ ( self : Union[str, Any], a_ : str, a_ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(a_ ) and not (os.path.isdir(a_ ) and os.listdir(a_ ))
)
def lowercase_ ( self : Tuple, a_ : str, a_ : bool = False ):
"""simple docstring"""
UpperCamelCase__ = self.extractor.infer_extractor_format(a_ )
if not extractor_format:
return input_path
UpperCamelCase__ = self._get_output_path(a_ )
if self._do_extract(a_, a_ ):
self.extractor.extract(a_, a_, a_ )
return output_path
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@classmethod
@abstractmethod
def lowercase_ ( cls : Union[str, Any], a_ : Union[Path, str], **a_ : Tuple ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
...
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[bytes] = []
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : int ):
"""simple docstring"""
with open(a_, "rb" ) as f:
return f.read(a_ )
@classmethod
def lowercase_ ( cls : Any, a_ : Union[Path, str], a_ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
UpperCamelCase__ = max(len(a_ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase__ = cls.read_magic_number(a_, a_ )
except OSError:
return False
return any(magic_number.startswith(a_ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@classmethod
def lowercase_ ( cls : Dict, a_ : Union[Path, str], **a_ : List[Any] ):
"""simple docstring"""
return tarfile.is_tarfile(a_ )
@staticmethod
def lowercase_ ( a_ : Optional[Any], a_ : int ):
"""simple docstring"""
def resolved(a_ : str ) -> str:
return os.path.realpath(os.path.abspath(a_ ) )
def badpath(a_ : str, a_ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a_, a_ ) ).startswith(a_ )
def badlink(a_ : Union[str, Any], a_ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase__ = resolved(os.path.join(a_, os.path.dirname(info.name ) ) )
return badpath(info.linkname, base=a_ )
UpperCamelCase__ = resolved(a_ )
for finfo in members:
if badpath(finfo.name, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(a_, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(a_, a_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(a_, exist_ok=a_ )
UpperCamelCase__ = tarfile.open(a_ )
tar_file.extractall(a_, members=TarExtractor.safemembers(a_, a_ ) )
tar_file.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = [B'\x1F\x8B']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(a_, "rb" ) as gzip_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowercase_ ( cls : Dict, a_ : Union[Path, str], a_ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(a_, magic_number=a_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a_, "rb" ) as fp:
UpperCamelCase__ = _EndRecData(a_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase__ = fp.read(a_ ) # CD is where we expect it to be
if len(a_ ) == sizeCentralDir:
UpperCamelCase__ = struct.unpack(a_, a_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(a_, exist_ok=a_ )
with zipfile.ZipFile(a_, "r" ) as zip_file:
zip_file.extractall(a_ )
zip_file.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(a_ ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(a_, exist_ok=a_ )
UpperCamelCase__ = rarfile.RarFile(a_ )
rf.extractall(a_ )
rf.close()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
UpperCamelCase__ = zstd.ZstdDecompressor()
with open(a_, "rb" ) as ifh, open(a_, "wb" ) as ofh:
dctx.copy_stream(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = [B'\x42\x5A\x68']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
with bza.open(a_, "rb" ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(a_, exist_ok=a_ )
with pyazr.SevenZipFile(a_, "r" ) as archive:
archive.extractall(a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = [B'\x04\x22\x4D\x18']
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(a_, "rb" ) as compressed_file:
with open(a_, "wb" ) as extracted_file:
shutil.copyfileobj(a_, a_ )
class UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowercase_ ( cls : List[str] ):
"""simple docstring"""
return max(
len(a_ )
for extractor in cls.extractors.values()
if issubclass(a_, a_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowercase_ ( a_ : Union[Path, str], a_ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(a_, magic_number_length=a_ )
except OSError:
return b""
@classmethod
def lowercase_ ( cls : Tuple, a_ : Union[Path, str], a_ : bool = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.", category=a_, )
UpperCamelCase__ = cls.infer_extractor_format(a_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowercase_ ( cls : List[Any], a_ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase__ = cls._get_magic_number_max_length()
UpperCamelCase__ = cls._read_magic_number(a_, a_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a_, magic_number=a_ ):
return extractor_format
@classmethod
def lowercase_ ( cls : int, a_ : Union[Path, str], a_ : Union[Path, str], a_ : Optional[str] = None, a_ : Optional[BaseExtractor] = "deprecated", ):
"""simple docstring"""
os.makedirs(os.path.dirname(a_ ), exist_ok=a_ )
# Prevent parallel extractions
UpperCamelCase__ = str(Path(a_ ).with_suffix(".lock" ) )
with FileLock(a_ ):
shutil.rmtree(a_, ignore_errors=a_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a_, a_ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.", category=a_, )
UpperCamelCase__ = extractor if extractor != "deprecated" else extractor_format
else:
UpperCamelCase__ = cls.extractors[extractor_format]
return extractor.extract(a_, a_ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.", category=a_, )
for extractor in cls.extractors.values():
if extractor.is_extractable(a_ ):
return extractor.extract(a_, a_ ) | 359 |
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 31 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A =True
except ImportError:
A =False
A =logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ (_a : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _a ( __a ):
@staticmethod
def A ( lowercase : ArgumentParser ):
'''simple docstring'''
UpperCAmelCase = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=lowercase , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=lowercase , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowercase )
def __init__( self : List[Any] , lowercase : bool , lowercase : str , lowercase : Dict=None , *lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = testing
UpperCAmelCase = testing_file
UpperCAmelCase = path
def A ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowercase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
UpperCAmelCase = (
Path(lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCAmelCase = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
UpperCAmelCase = json.load(lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase , extra_context=lowercase , )
UpperCAmelCase = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = configuration['''lowercase_modelname''']
UpperCAmelCase = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
UpperCAmelCase = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = '''Flax''' in generate_tensorflow_pytorch_and_flax
UpperCAmelCase = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(lowercase , exist_ok=lowercase )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=lowercase )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(lowercase : Union[str, Any] ):
with open(lowercase , '''r''' ) as f:
UpperCAmelCase = f.readlines()
with open(lowercase , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase : str , lowercase : str , lowercase : List[str] ):
# Create temp file
UpperCAmelCase , UpperCAmelCase = mkstemp()
UpperCAmelCase = False
with fdopen(lowercase , '''w''' ) as new_file:
with open(lowercase ) as old_file:
for line in old_file:
new_file.write(lowercase )
if line_to_copy_below in line:
UpperCAmelCase = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(lowercase , lowercase )
# Remove original file
remove(lowercase )
# Move new file
move(lowercase , lowercase )
def skip_units(lowercase : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase : Tuple ):
with open(lowercase ) as datafile:
UpperCAmelCase = []
UpperCAmelCase = False
UpperCAmelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCAmelCase = line.split('''"''' )[1]
UpperCAmelCase = skip_units(lowercase )
elif "# Below: " in line and "##" not in line:
UpperCAmelCase = line.split('''"''' )[1]
UpperCAmelCase = skip_units(lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase , lowercase , lowercase )
UpperCAmelCase = []
elif "# Replace with" in line and "##" not in line:
UpperCAmelCase = []
elif "##" not in line:
lines_to_copy.append(lowercase )
remove(lowercase )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(lowercase )
| 34 |
from __future__ import annotations
from math import ceil, floor, sqrt
def __UpperCamelCase ( _lowerCAmelCase = 200_0000 ) -> int:
"""simple docstring"""
A : list[int] = [0]
A : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A : int = 0
# the area corresponding to the grid that gives the product closest to target
A : int = 0
# an estimate of b, using the quadratic formula
A : float
# the largest integer less than b_estimate
A : int
# the largest integer less than b_estimate
A : int
# the triangle number corresponding to b_floor
A : int
# the triangle number corresponding to b_ceil
A : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A : List[Any] = floor(_lowerCAmelCase )
A : Tuple = ceil(_lowerCAmelCase )
A : int = triangle_numbers[b_floor]
A : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A : Optional[int] = triangle_b_first_guess * triangle_a
A : Optional[int] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A : Tuple = triangle_b_second_guess * triangle_a
A : Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a_ ( __lowercase : str ) -> None:
_snake_case , _snake_case = analyze_text(__lowercase )
_snake_case = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_snake_case = sum(single_char_strings.values() )
# one length string
_snake_case = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_snake_case = single_char_strings[ch]
_snake_case = my_str / all_sum
my_fir_sum += prob * math.loga(__lowercase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_snake_case = sum(two_char_strings.values() )
_snake_case = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_snake_case = cha + cha
if sequence in two_char_strings:
_snake_case = two_char_strings[sequence]
_snake_case = int(__lowercase ) / all_sum
my_sec_sum += prob * math.loga(__lowercase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def a_ ( __lowercase : str ) -> tuple[dict, dict]:
_snake_case = Counter() # type: ignore
_snake_case = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a_ ( ) -> str:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 354 |
def a_ ( __lowercase : List[Any] ) -> Tuple:
_snake_case = len(__lowercase )
for i in range(length - 1 ):
_snake_case = i
for k in range(i + 1 , __lowercase ):
if collection[k] < collection[least]:
_snake_case = k
if least != i:
_snake_case , _snake_case = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_lowerCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : List[Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted)) | 130 | 0 |
"""simple docstring"""
import random
from typing import Any
def a_ ( lowerCamelCase ):
for _ in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : str = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 98 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCamelCase_ = ['''small''', '''medium''', '''large''']
lowerCamelCase_ = '''lm_head.decoder.weight'''
lowerCamelCase_ = '''lm_head.weight'''
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = torch.load(__a )
UpperCamelCase__ = d.pop(__a )
os.makedirs(__a , exist_ok=__a )
torch.save(__a , os.path.join(__a , __a ) )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCamelCase_ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
lowerCamelCase_ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 244 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple ) -> str:
UpperCAmelCase_= params
UpperCAmelCase_= np.array(__UpperCAmelCase )
UpperCAmelCase_= np.array([len(__UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , __UpperCAmelCase : int ) -> Optional[int]:
return (self.token_ids[index], self.lengths[index])
def __len__( self : str ) -> Any:
return len(self.lengths )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
UpperCAmelCase_= self.params.max_model_input_size
UpperCAmelCase_= self.lengths > max_len
logger.info(F"""Splitting {sum(__UpperCAmelCase )} too long sequences.""" )
def divide_chunks(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
UpperCAmelCase_= []
UpperCAmelCase_= []
if self.params.mlm:
UpperCAmelCase_, UpperCAmelCase_= self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
UpperCAmelCase_, UpperCAmelCase_= self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase_= []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase_= np.insert(__UpperCAmelCase , 0 , __UpperCAmelCase )
if sub_s[-1] != sep_id:
UpperCAmelCase_= np.insert(__UpperCAmelCase , len(__UpperCAmelCase ) , __UpperCAmelCase )
assert len(__UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCAmelCase )
new_tok_ids.extend(__UpperCAmelCase )
new_lengths.extend([len(__UpperCAmelCase ) for l in sub_seqs] )
UpperCAmelCase_= np.array(__UpperCAmelCase )
UpperCAmelCase_= np.array(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_= len(self )
UpperCAmelCase_= self.lengths > 11
UpperCAmelCase_= self.token_ids[indices]
UpperCAmelCase_= self.lengths[indices]
UpperCAmelCase_= len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase_= self.params.special_tok_ids["""unk_token"""]
UpperCAmelCase_= len(self )
UpperCAmelCase_= np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase_= (unk_occs / self.lengths) < 0.5
UpperCAmelCase_= self.token_ids[indices]
UpperCAmelCase_= self.lengths[indices]
UpperCAmelCase_= len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[int] ) -> List[Any]:
UpperCAmelCase_= [t[0] for t in batch]
UpperCAmelCase_= [t[1] for t in batch]
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
# Max for paddings
UpperCAmelCase_= max(__UpperCAmelCase )
# Pad token ids
if self.params.mlm:
UpperCAmelCase_= self.params.special_tok_ids["""pad_token"""]
else:
UpperCAmelCase_= self.params.special_tok_ids["""unk_token"""]
UpperCAmelCase_= [list(t.astype(__UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCAmelCase )
assert all(len(__UpperCAmelCase ) == max_seq_len_ for t in tk_ )
UpperCAmelCase_= torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase_= torch.tensor(__UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 277 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Tuple = KandinskyInpaintPipeline
a__ : Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
a__ : str = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a__ : List[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ : Union[str, Any] = False
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_= XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_= MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase_= MultilingualCLIP(__UpperCAmelCase )
UpperCAmelCase_= text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_= {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_= UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_= VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_= self.dummy_text_encoder
UpperCAmelCase_= self.dummy_tokenizer
UpperCAmelCase_= self.dummy_unet
UpperCAmelCase_= self.dummy_movq
UpperCAmelCase_= DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , )
UpperCAmelCase_= {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=0 ) -> Dict:
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_= floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
UpperCAmelCase_= floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_= Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
UpperCAmelCase_= np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_= 0
if str(__UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase_= torch.manual_seed(__UpperCAmelCase )
else:
UpperCAmelCase_= torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
UpperCAmelCase_= {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= """cpu"""
UpperCAmelCase_= self.get_dummy_components()
UpperCAmelCase_= self.pipeline_class(**__UpperCAmelCase )
UpperCAmelCase_= pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
UpperCAmelCase_= output.images
UpperCAmelCase_= pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_= np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_= load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase_= load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase_= np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase_= 0
UpperCAmelCase_= """a hat"""
UpperCAmelCase_= KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
UpperCAmelCase_= KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase_= pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase_= torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_, UpperCAmelCase_= pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase_= pipeline(
__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
UpperCAmelCase_= output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
"""simple docstring"""
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
A__ = []
def populate_output(lowercase_ , lowercase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowercase_ , lowercase_ )
return output
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Sequence[Node | None]:
"""simple docstring"""
A__ = []
def populate_output(lowercase_ , lowercase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowercase_ , lowercase_ )
return output
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
A__ = []
A__ = 0
A__ = height(lowercase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase_ , lowercase_ ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(lowercase_ , lowercase_ ) )
A__ = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
"""simple docstring"""
A__ = make_tree()
print(f"""In-order Traversal: {inorder(lowercase_ )}""" )
print(f"""Pre-order Traversal: {preorder(lowercase_ )}""" )
print(f"""Post-order Traversal: {postorder(lowercase_ )}""" , '''\n''' )
print(f"""Height of Tree: {height(lowercase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowercase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowercase_ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowercase_ , level=lowercase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
UpperCamelCase :int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCamelCase )
UpperCamelCase :Any = -1
UpperCamelCase :List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
UpperCamelCase :Tuple = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase :Union[str, Any] = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase :Any = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = -1
UpperCamelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
UpperCamelCase :Optional[Any] = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
UpperCamelCase :str = tokenizer.decode(greedy_ids[0] )
UpperCamelCase :List[str] = TextIteratorStreamer(__lowerCamelCase )
UpperCamelCase :List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase :int = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
UpperCamelCase :Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase :Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCamelCase )
UpperCamelCase :Any = -1
UpperCamelCase :str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
UpperCamelCase :List[str] = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase :List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase :Optional[Any] = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase :str = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase :Optional[int] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCamelCase :Dict = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__lowerCamelCase )
UpperCamelCase :str = -1
UpperCamelCase :Optional[Any] = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase :Any = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase :str = cs.out[:-1] # Remove the final "\n"
UpperCamelCase :List[str] = tokenizer(__lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _A ( self : str ):
UpperCamelCase :Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase :Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCamelCase )
UpperCamelCase :int = -1
UpperCamelCase :int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 )
UpperCamelCase :Dict = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase :Dict = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
UpperCamelCase :Optional[int] = """"""
for new_text in streamer:
streamer_text += new_text
| 62 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict[int, list[int]] ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCamelCase :Any = 0
UpperCamelCase :int = len(__magic_name__ ) # No of vertices in graph
UpperCamelCase :int = [0] * n
UpperCamelCase :Union[str, Any] = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ):
UpperCamelCase :Any = True
UpperCamelCase :str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ )
UpperCamelCase :Dict = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCamelCase :int = min(low[at] , low[to] )
UpperCamelCase :list[tuple[int, int]] = []
for i in range(__magic_name__ ):
if not visited[i]:
dfs(__magic_name__ , -1 , __magic_name__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ (lowerCamelCase_ ):
@staticmethod
@abstractmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__( self :Optional[Any] ) -> int:
raise NotImplementedError()
| 240 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() ,encoding='utf-8' ,check=__snake_case ,)
assert hasattr(self ,'env' )
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[Any]=1 ) -> int:
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'{self.env.base_job_name}-single' ,instance_count=__snake_case ,instance_type=self.instance_type ,debugger_hook_config=__snake_case ,hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='py36' ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> Optional[int]:
TrainingJobAnalytics(__snake_case ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
# create estimator
a__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__snake_case )
| 240 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _lowerCAmelCase ( snake_case_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__UpperCAmelCase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__UpperCAmelCase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__UpperCAmelCase : str = "question"
__UpperCAmelCase : str = "context"
__UpperCAmelCase : str = "answers"
@property
def lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 364 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : np.ndarray
__UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 112 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 146 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
UpperCamelCase__ : str = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ : Optional[int] = v
else:
UpperCamelCase__ : Tuple = v
UpperCamelCase__ : Union[str, Any] = chkpt['''params''']
UpperCamelCase__ : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ : Dict = chkpt['''dico_word2id''']
UpperCamelCase__ : Dict = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase__ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 146 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ) -> Dict:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = DonutImageProcessingTester(self )
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_thumbnail' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_align_long_axis' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_flaky()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 354 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ = 768 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.zeros(1 , A_ ) )
UpperCamelCase = nn.Parameter(torch.ones(1 , A_ ) )
def __UpperCamelCase ( self , A_ = None , A_ = None , ) -> Any:
"""simple docstring"""
UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 110 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''▁'''
_UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_UpperCamelCase = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] =["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=[] , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__snake_case : Union[str, Any] = vocab_file
__snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Dict[str, int]:
'''simple docstring'''
__snake_case : Optional[Any] = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : List[str] = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__snake_case : List[str] = self.sp_model.IdToPiece(UpperCAmelCase )
return token
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = []
__snake_case : Optional[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__snake_case : Any = []
else:
current_sub_tokens.append(UpperCAmelCase )
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : int = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
__snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 326 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Tuple = FlaxAutoencoderKL
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.uniform(a_, ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict | 31 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 31 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A ( __UpperCAmelCase ):
__snake_case = 'beit'
def __init__( self, UpperCamelCase__=8192, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.02, UpperCamelCase__=1E-12, UpperCamelCase__=224, UpperCamelCase__=16, UpperCamelCase__=3, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=True, UpperCamelCase__=[3, 5, 7, 11], UpperCamelCase__=[1, 2, 3, 6], UpperCamelCase__=True, UpperCamelCase__=0.4, UpperCamelCase__=256, UpperCamelCase__=1, UpperCamelCase__=False, UpperCamelCase__=255, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**__lowercase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class A ( __UpperCAmelCase ):
__snake_case = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 1E-4
| 278 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
__lowercase =[]
__lowercase =list(__lowercase )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.__components )
def __str__( self : int ):
"""simple docstring"""
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self : List[Any] , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception('must have the same size' )
def __sub__( self : str , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : int , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , (float, int) ):
__lowercase =[c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
__lowercase =len(self )
__lowercase =[self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception('invalid operand!' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return Vector(self.__components )
def snake_case ( self : str , __lowercase : int ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def snake_case ( self : List[str] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__lowercase =value
def snake_case ( self : Tuple ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__lowercase =[c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def snake_case ( self : List[Any] , __lowercase : Vector , __lowercase : bool = False ):
"""simple docstring"""
__lowercase =self * other
__lowercase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ )
return Vector([0] * dimension )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ ) and (isinstance(lowercase__, lowercase__ ))
__lowercase =[0] * dimension
__lowercase =1
return Vector(lowercase__ )
def __UpperCamelCase ( lowercase__ : float, lowercase__ : Vector, lowercase__ : Vector ):
'''simple docstring'''
assert (
isinstance(lowercase__, lowercase__ )
and isinstance(lowercase__, lowercase__ )
and (isinstance(lowercase__, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase :
def __init__( self : Dict , __lowercase : list[list[float]] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
__lowercase =matrix
__lowercase =w
__lowercase =h
def __str__( self : Optional[Any] ):
"""simple docstring"""
__lowercase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Union[str, Any] , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : int , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : str , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : Tuple , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
__lowercase =zero_vector(self.__height )
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
__lowercase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def snake_case ( self : int ):
"""simple docstring"""
return self.__height
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.__width
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int , __lowercase : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowercase =value
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__lowercase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
__lowercase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Union[str, Any] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception('Indices out of bounds' )
def snake_case ( self : Tuple ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowercase =[
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =[[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__, lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[
[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__, lowercase__, lowercase__ )
| 141 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = AutoencoderKL
UpperCAmelCase__ : Tuple = "sample"
UpperCAmelCase__ : Optional[int] = 1e-2
@property
def _a ( self ) -> int:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =(32, 32)
__UpperCamelCase =floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def _a ( self ) -> Any:
return (3, 32, 32)
@property
def _a ( self ) -> Union[str, Any]:
return (3, 32, 32)
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> List[Any]:
pass
def _a ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def _a ( self ) -> str:
# enable deterministic behavior for gradient checkpointing
__UpperCamelCase , __UpperCamelCase =self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase =self.model_class(**A_ )
model.to(A_ )
assert not model.is_gradient_checkpointing and model.training
__UpperCamelCase =model(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__UpperCamelCase =torch.randn_like(A_ )
__UpperCamelCase =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__UpperCamelCase =self.model_class(**A_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(A_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__UpperCamelCase =model_a(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__UpperCamelCase =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__UpperCamelCase =dict(model.named_parameters() )
__UpperCamelCase =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(A_ )
__UpperCamelCase =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> str:
__UpperCamelCase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__UpperCamelCase =model.to(A_ )
model.eval()
if torch_device == "mps":
__UpperCamelCase =torch.manual_seed(0 )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCamelCase =image.to(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , sample_posterior=A_ , generator=A_ ).sample
__UpperCamelCase =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__UpperCamelCase =torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__UpperCamelCase =torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__UpperCamelCase =torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> Optional[int]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy'
def _a ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , A_=0 , A_=(4, 3, 512, 512) , A_=False ) -> List[str]:
__UpperCamelCase =torch.floataa if fpaa else torch.floataa
__UpperCamelCase =torch.from_numpy(load_hf_numpy(self.get_file_format(A_ , A_ ) ) ).to(A_ ).to(A_ )
return image
def _a ( self , A_="CompVis/stable-diffusion-v1-4" , A_=False ) -> Dict:
__UpperCamelCase ='fp16' if fpaa else None
__UpperCamelCase =torch.floataa if fpaa else torch.floataa
__UpperCamelCase =AutoencoderKL.from_pretrained(
A_ , subfolder='vae' , torch_dtype=A_ , revision=A_ , )
model.to(A_ ).eval()
return model
def _a ( self , A_=0 ) -> Any:
if torch_device == "mps":
return torch.manual_seed(A_ )
return torch.Generator(device=A_ ).manual_seed(A_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _a ( self , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCamelCase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> int:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , fpaa=A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCamelCase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> str:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(A_ , A_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _a ( self , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model.encode(A_ ).latent_dist
__UpperCamelCase =dist.sample(generator=A_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__UpperCamelCase =sample[0, -1, -3:, -3:].flatten().cpu()
__UpperCamelCase =torch.tensor(A_ )
__UpperCamelCase =3E-3 if torch_device != 'mps' else 1E-2
assert torch_all_close(A_ , A_ , atol=A_ )
| 351 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__UpperCamelCase =DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE__ , with_box_refine=SCREAMING_SNAKE_CASE__ , two_stage=SCREAMING_SNAKE_CASE__ , )
# set labels
__UpperCamelCase ='huggingface/label-files'
if "o365" in model_name:
__UpperCamelCase =3_66
__UpperCamelCase ='object365-id2label.json'
else:
__UpperCamelCase =91
__UpperCamelCase ='coco-detection-id2label.json'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =dct.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase =state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCamelCase =state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[:dim, :]
__UpperCamelCase =in_proj_bias[: dim]
__UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase =in_proj_bias[
dim : dim * 2
]
__UpperCamelCase =in_proj_weight[
-dim :, :
]
__UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
# transformer decoder self-attention layers
__UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase =state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCamelCase =state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[:hidden_size, :]
__UpperCamelCase =in_proj_bias[:hidden_size]
__UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase =in_proj_weight[-hidden_size:, :]
__UpperCamelCase =in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =get_deta_config(SCREAMING_SNAKE_CASE__ )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase =hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase =hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'Model name {model_name} not supported' )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
# rename keys
__UpperCamelCase =create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
if "input_proj" in key:
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =val
# finally, create HuggingFace model and load state dict
__UpperCamelCase =DetaForObjectDetection(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
__UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu'
model.to(SCREAMING_SNAKE_CASE__ )
# load image processor
__UpperCamelCase =DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__UpperCamelCase =prepare_img()
__UpperCamelCase =processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__UpperCamelCase =encoding['pixel_values']
__UpperCamelCase =model(pixel_values.to(SCREAMING_SNAKE_CASE__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase =torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCamelCase =torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase =torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCamelCase =torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 117 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Tuple , UpperCamelCase__: str=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: Tuple=18 , UpperCamelCase__: str=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: List[Any]=None , UpperCamelCase__: Tuple=True , UpperCamelCase__: Dict=None , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase__: str=[0.5, 0.5, 0.5] , UpperCamelCase__: Union[str, Any]=False , ):
lowerCamelCase__ : List[Any] = size if size is not None else {"""height""": 20, """width""": 20}
lowerCamelCase__ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : List[str] = min_resolution
lowerCamelCase__ : Optional[int] = max_resolution
lowerCamelCase__ : str = do_resize
lowerCamelCase__ : Dict = size
lowerCamelCase__ : Any = do_center_crop
lowerCamelCase__ : Tuple = crop_size
lowerCamelCase__ : Optional[int] = do_normalize
lowerCamelCase__ : List[str] = image_mean
lowerCamelCase__ : Dict = image_std
lowerCamelCase__ : Union[str, Any] = do_reduce_labels
def lowerCamelCase_ ( self: Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : Dict = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase__ : str = Image.open(dataset[0]["""file"""] )
lowerCamelCase__ : List[str] = Image.open(dataset[1]["""file"""] )
return image, map
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : int = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowerCamelCase__ : Optional[Any] = Image.open(ds[0]["""file"""] )
lowerCamelCase__ : Any = Image.open(ds[1]["""file"""] )
lowerCamelCase__ : Dict = Image.open(ds[2]["""file"""] )
lowerCamelCase__ : Tuple = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
a = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[Any] = BeitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
lowerCamelCase__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: Tuple ):
# Initialize image_processing
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self: int ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ : int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self: List[Any] ):
# Initialize image_processing
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCamelCase__ : str = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase_ ( self: List[Any] ):
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
lowerCamelCase__ : List[str] = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ : int = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowerCamelCase__ : List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ : Dict = prepare_semantic_single_inputs()
lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = prepare_semantic_batch_inputs()
lowerCamelCase__ : int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def lowerCamelCase_ ( self: Tuple ):
# Initialize image_processing
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = prepare_semantic_single_inputs()
lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : List[Any] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 41 |
from ... import PretrainedConfig
_SCREAMING_SNAKE_CASE : Dict = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a = "nezha"
def __init__( self : Optional[Any] , __lowerCamelCase : str=2_1128 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Tuple=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = max_relative_position
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = use_cache
| 314 | 0 |
'''simple docstring'''
def _a ( _lowercase : int = 600851475143 ):
'''simple docstring'''
try:
__UpperCAmelCase : str = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : int = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 240 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Any , snake_case : Optional[int]=13 , snake_case : List[str]=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Tuple=True , snake_case : int=99 , snake_case : Any=16 , snake_case : Dict=36 , snake_case : Any=6 , snake_case : Dict=6 , snake_case : Dict=6 , snake_case : int=37 , snake_case : int="gelu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : Dict=512 , snake_case : List[Any]=16 , snake_case : Any=2 , snake_case : Any=0.02 , snake_case : Optional[int]=3 , snake_case : List[Any]=4 , snake_case : List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = embedding_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_hidden_groups
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : str = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , sentence_order_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Any = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[str] ) -> int:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : List[Any] = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = True
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Tuple=False ) -> Optional[Any]:
__UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[Any] = AlbertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
__UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 240 | 1 |
import inspect
import unittest
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
UpperCamelCase = inspect.getmembers(A_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCamelCase = 'k-diffusion'
elif backend == "invisible_watermark":
UpperCamelCase = 'invisible-watermark'
assert backend in deps, F'''{backend} is not in the deps table!'''
| 222 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(lowercase )
elif "subsample" in key:
UpperCamelCase = s_dict.pop(lowercase )
def A ( lowercase ) -> Tuple:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(lowercase , lowercase , bias=lowercase )
UpperCamelCase = emb.weight.data
return lin_layer
def A ( lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase , map_location='cpu' )
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(lowercase )
rename_keys(lowercase )
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(lowercase ) for i in args.conv_kernel_sizes.split(',' )]
UpperCamelCase = SpeechaTextConfig(
vocab_size=lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=lowercase , num_beams=5 , max_length=200 , use_cache=lowercase , decoder_start_token_id=2 , early_stopping=lowercase , )
UpperCamelCase = SpeechaTextForConditionalGeneration(lowercase )
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(lowercase , strict=lowercase )
if len(lowercase ) > 0 and not set(lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCAmelCase : Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 222 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class A_ ( lowercase__ ):
"""simple docstring"""
__UpperCamelCase = '''vivit'''
def __init__( self :Tuple , lowercase_ :int=2_24 , lowercase_ :Optional[Any]=32 , lowercase_ :Optional[Any]=[2, 16, 16] , lowercase_ :Any=3 , lowercase_ :Union[str, Any]=7_68 , lowercase_ :Optional[Any]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Dict=30_72 , lowercase_ :List[Any]="gelu_fast" , lowercase_ :List[Any]=0.0 , lowercase_ :Optional[int]=0.0 , lowercase_ :List[str]=0.02 , lowercase_ :List[Any]=1E-06 , lowercase_ :List[str]=True , **lowercase_ :Tuple , ) -> str:
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = num_frames
UpperCAmelCase = tubelet_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
super().__init__(**_a )
| 355 |
"""simple docstring"""
import math
def _lowerCAmelCase ( lowercase_ ):
assert isinstance(lowercase_ , lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase = range(3 , int(math.sqrt(lowercase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowerCAmelCase ( lowercase_ , lowercase_=1 , **lowercase_ ):
UpperCAmelCase = factor * value
UpperCAmelCase = value
while not is_prime(lowercase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase_ )
return value
| 181 | 0 |
"""simple docstring"""
import numpy as np
def _A ( lowercase , lowercase , lowercase = 1E-12 , lowercase = 1_00 , ):
"""simple docstring"""
assert np.shape(lowercase )[0] == np.shape(lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase )[0] == np.shape(lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase ) == np.iscomplexobj(lowercase )
a =np.iscomplexobj(lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
a =False
a =0
a =0
a =1E12
while not convergence:
# Multiple matrix by the vector.
a =np.dot(lowercase , lowercase )
# Normalize the resulting output vector.
a =w / np.linalg.norm(lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
a =vector.conj().T if is_complex else vector.T
a =np.dot(lowercase , np.dot(lowercase , lowercase ) )
# Check convergence.
a =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
a =True
a =lambda_
if is_complex:
a =np.real(lambda_ )
return lambda_, vector
def _A ( ):
"""simple docstring"""
a =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
a =np.array([41, 4, 20] )
a =real_input_matrix.astype(np.complexaaa )
a =np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
a =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
a =real_input_matrix
a =real_vector
elif problem_type == "complex":
a =complex_input_matrix
a =complex_vector
# Our implementation.
a , a =power_iteration(lowercase , lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
a , a =np.linalg.eigh(lowercase )
# Last eigenvalue is the maximum one.
a =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
a =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase ) - np.abs(lowercase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 81 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = "cpu", lowerCamelCase = None ):
lowercase :Optional[Any] = torch.load(lowerCamelCase, map_location=lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase, torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase :List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase :Optional[Any] = src_path
torch.save(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 236 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : int=False ):
"""simple docstring"""
__a =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCamelCase_( _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__a =''
else:
__a ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a =state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
__a =state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__a =in_proj_weight[
: config.hidden_size, :
]
__a =in_proj_bias[: config.hidden_size]
__a =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a =in_proj_weight[
-config.hidden_size :, :
]
__a =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
__a =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : List[Any] ):
"""simple docstring"""
__a =[
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
__a =dct.pop(_snake_case )
__a =val
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =ViTMSNConfig()
__a =1000
__a ='datasets/huggingface/label-files'
__a ='imagenet-1k-id2label.json'
__a =json.load(open(hf_hub_download(_snake_case , _snake_case ) , 'r' ) )
__a ={int(_snake_case ): v for k, v in idalabel.items()}
__a =idalabel
__a ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a =384
__a =1536
__a =6
elif "l16" in checkpoint_url:
__a =1024
__a =4096
__a =24
__a =16
__a =0.1
elif "b4" in checkpoint_url:
__a =4
elif "l7" in checkpoint_url:
__a =7
__a =1024
__a =4096
__a =24
__a =16
__a =0.1
__a =ViTMSNModel(_snake_case )
__a =torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )['target_encoder']
__a =ViTImageProcessor(size=config.image_size )
remove_projection_head(_snake_case )
__a =create_rename_keys(_snake_case , base_model=_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case , base_model=_snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__a =ViTImageProcessor(
size=config.image_size , image_mean=_snake_case , image_std=_snake_case )
__a =image_processor(images=_snake_case , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__a =model(**_snake_case )
__a =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a =torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
__a =torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
__a =torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
__a =torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
__a =torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _snake_case , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 366 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :int = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self : Optional[Any] , _lowerCAmelCase : List[str]=8_0_6_5 , _lowerCAmelCase : Optional[Any]=1_5_3_6 , _lowerCAmelCase : List[Any]=3_6 , _lowerCAmelCase : List[str]=6_1_4_4 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : str=3_8_4 , _lowerCAmelCase : Any=9_2_0 , _lowerCAmelCase : List[str]=1e-5 , _lowerCAmelCase : str=0.3 , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Any=0.3 , _lowerCAmelCase : str=0.3 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : str=0.3 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Union[str, Any]=(7,) , _lowerCAmelCase : Optional[int]=(3,) , _lowerCAmelCase : List[str]=8_0 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : str="sum" , _lowerCAmelCase : Tuple=False , **_lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(_lowerCAmelCase )
snake_case_ = list(_lowerCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 159 |
from math import pi
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 159 | 1 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__A = re.compile(R'''\s+''')
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_lowerCamelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowercase_ ( _lowerCamelCase: str ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = [len(_lowerCamelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCamelCase ), "line_max": max(_lowerCamelCase )}
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
__lowerCamelCase : str = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Any ) -> str:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Dict=5 ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = ["auto-generated", "autogenerated", "automatically generated"]
__lowerCamelCase : Optional[int] = example["content"].splitlines()
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: int=5 , _lowerCamelCase: str=0.05 ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : List[str] = ["unit tests", "test file", "configuration file"]
__lowerCamelCase : Dict = example["content"].splitlines()
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCamelCase ) , _lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__lowerCamelCase : str = example["content"].count("\n" )
__lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = ["def ", "class ", "for ", "while "]
__lowerCamelCase : Optional[int] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: str=4 ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = example["content"].splitlines()
__lowerCamelCase : int = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowercase_ ( _lowerCamelCase: Optional[int] ) -> int:
'''simple docstring'''
__lowerCamelCase : int = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
__lowerCamelCase : List[Any] = len(example["content"] ) / len(_lowerCamelCase )
return {"ratio": ratio}
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Tuple = {}
results.update(get_hash(_lowerCamelCase ) )
results.update(line_stats(_lowerCamelCase ) )
results.update(alpha_stats(_lowerCamelCase ) )
results.update(char_token_ratio(_lowerCamelCase ) )
results.update(is_autogenerated(_lowerCamelCase ) )
results.update(is_config_or_test(_lowerCamelCase ) )
results.update(has_no_keywords(_lowerCamelCase ) )
results.update(has_few_assignments(_lowerCamelCase ) )
return results
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: int , _lowerCamelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
if not check_uniques(_lowerCamelCase , _lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowercase_ ( _lowerCamelCase: Dict ) -> Any:
'''simple docstring'''
with open(_lowerCamelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCamelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCamelCase , _lowerCamelCase )
os.unlink(_lowerCamelCase )
# Settings
__A = HfArgumentParser(PreprocessingArguments)
__A = parser.parse_args()
if args.num_workers is None:
__A = multiprocessing.cpu_count()
__A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__A = time.time()
__A = load_dataset(args.dataset_name, split='''train''')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__A = time.time()
__A = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__A = set(ds.unique('''hash'''))
__A = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__A = time.time()
__A = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__A = time.time()
__A, __A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__A = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__A = str(data_dir / F"""file-{file_number+1:012}.json""")
__A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""") | 64 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self : List[str] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : List[str] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : int = size if size is not None else {"height": 256, "width": 256}
__lowerCamelCase : str = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Any = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : str = resample
__lowerCamelCase : str = do_center_crop
__lowerCamelCase : List[str] = crop_size
__lowerCamelCase : Union[str, Any] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
__lowerCamelCase : int = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ):
__lowerCamelCase : Optional[int] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : int , ):
__lowerCamelCase : int = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : int = image_std if image_std is not None else self.image_std
__lowerCamelCase : Optional[int] = size if size is not None else self.size
__lowerCamelCase : Optional[Any] = get_size_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Optional[Any] = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowerCamelCase : Optional[int] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase : Optional[int] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase : int = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase : List[str] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase : Optional[int] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__lowerCamelCase : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCamelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 64 | 1 |
from __future__ import annotations
import time
import numpy as np
_UpperCamelCase = [8, 5, 9, 7]
_UpperCamelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCamelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowercase :
def __init__( self , A_ , A_ , A_ , ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = claim_vector
__lowerCAmelCase : str = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowercase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return {self.__need().index(_lowercase ): i for i in self.__need()}
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.__need()
__lowerCAmelCase : str = self.__allocated_resources_table
__lowerCAmelCase : Any = self.__available_resources()
__lowerCAmelCase : List[str] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
__lowerCAmelCase : Optional[int] = False
for each_need in need_list:
__lowerCAmelCase : Tuple = True
for index, need in enumerate(_lowercase ):
if need > available_resources[index]:
__lowerCAmelCase : List[str] = False
break
if execution:
__lowerCAmelCase : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : str = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_lowercase )
# update available/freed resources stack
__lowerCAmelCase : str = np.array(_lowercase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(_lowercase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(_lowercase ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(_lowercase ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(_lowercase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(_lowercase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = str(id_ )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = []
__UpperCAmelCase = {} # {vertex:distance}
def __lt__( self : str , _lowercase : List[Any] ):
return self.key < other.key
def __repr__( self : int ):
return self.id
def a ( self : Union[str, Any] , _lowercase : int ):
self.neighbors.append(_lowercase )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = weight
def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
__UpperCAmelCase = []
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = graph[:]
while q:
__UpperCAmelCase = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__UpperCAmelCase = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_=True , a_="pt" ) -> List[str]:
"""simple docstring"""
A_ : Union[str, Any] = {"""add_prefix_space""": True} if isinstance(a_ , a_ ) and not line.startswith(""" """ ) else {}
A_ : Any = padding_side
return tokenizer(
[line] , max_length=a_ , padding="""max_length""" if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , )
def UpperCAmelCase ( a_ , a_ , a_=None , ) -> Dict:
"""simple docstring"""
A_ : List[Any] = input_ids.ne(a_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="train" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="" , ) -> Tuple:
super().__init__()
A_ : Optional[Any] = Path(_lowerCamelCase ).joinpath(type_path + """.source""" )
A_ : Any = Path(_lowerCamelCase ).joinpath(type_path + """.target""" )
A_ : Tuple = self.get_char_lens(self.src_file )
A_ : List[str] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
A_ : Tuple = tokenizer
A_ : int = prefix
if n_obs is not None:
A_ : List[str] = self.src_lens[:n_obs]
A_ : List[Any] = src_lang
A_ : Optional[Any] = tgt_lang
def __len__( self ) -> int:
return len(self.src_lens )
def __getitem__( self , _lowerCamelCase ) -> Dict[str, torch.Tensor]:
A_ : str = index + 1 # linecache starts at 1
A_ : Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip("""\n""" )
A_ : Any = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip("""\n""" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
A_ : List[Any] = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , """right""" )
A_ : Tuple = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , """right""" )
A_ : Any = source_inputs["""input_ids"""].squeeze()
A_ : int = target_inputs["""input_ids"""].squeeze()
A_ : List[Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase ) -> Dict:
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Dict[str, torch.Tensor]:
A_ : List[Any] = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : Tuple = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
A_ : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
A_ : str = trim_batch(_lowerCamelCase , _lowerCamelCase )
A_ , A_ : Optional[int] = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
A_ : int = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCamelCase__ : List[str] = getLogger(__name__)
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
return list(itertools.chain.from_iterable(a_ ) )
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
A_ : Any = get_git_info()
save_json(a_ , os.path.join(a_ , """git_log.json""" ) )
def UpperCAmelCase ( a_ , a_ , a_=4 , **a_ ) -> List[Any]:
"""simple docstring"""
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ , indent=a_ , **a_ )
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
with open(a_ ) as f:
return json.load(a_ )
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Optional[int] = git.Repo(search_parent_directories=a_ )
A_ : List[str] = {
"""repo_id""": str(a_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase ( a_ , a_ ) -> List:
"""simple docstring"""
return list(map(a_ , a_ ) )
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
with open(a_ , """wb""" ) as f:
return pickle.dump(a_ , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def remove_articles(a_ ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , a_ )
def white_space_fix(a_ ):
return " ".join(text.split() )
def remove_punc(a_ ):
A_ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a_ ) ) ) )
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : int = normalize_answer(a_ ).split()
A_ : List[Any] = normalize_answer(a_ ).split()
A_ : int = Counter(a_ ) & Counter(a_ )
A_ : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
A_ : Optional[Any] = 1.0 * num_same / len(a_ )
A_ : List[Any] = 1.0 * num_same / len(a_ )
A_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return normalize_answer(a_ ) == normalize_answer(a_ )
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
assert len(a_ ) == len(a_ )
A_ : Optional[Any] = 0
for hypo, pred in zip(a_ , a_ ):
em += exact_match_score(a_ , a_ )
if len(a_ ) > 0:
em /= len(a_ )
return {"em": em}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return model_prefix.startswith("""rag""" )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Dict = """dropout_rate"""
for p in extra_params:
if getattr(a_ , a_ , a_ ):
if not hasattr(a_ , a_ ) and not hasattr(a_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(a_ ) )
delattr(a_ , a_ )
continue
A_ : Any = p if hasattr(a_ , a_ ) else equivalent_param[p]
setattr(a_ , a_ , getattr(a_ , a_ ) )
delattr(a_ , a_ )
return hparams, config
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Any = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gpt_neo"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str]=5_0_2_5_7 , SCREAMING_SNAKE_CASE_ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : int=2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=[[["global", "local"], 1_2]] , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Dict=2_5_6 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1E-5 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[str]=5_0_2_5_6 , SCREAMING_SNAKE_CASE_ : Tuple=5_0_2_5_6 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[int] = num_layers
lowerCAmelCase_ : List[Any] = num_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Any = window_size
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : List[Any] = resid_dropout
lowerCAmelCase_ : Dict = embed_dropout
lowerCAmelCase_ : Tuple = attention_dropout
lowerCAmelCase_ : Any = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = layer_norm_epsilon
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : Union[str, Any] = bos_token_id
lowerCAmelCase_ : Optional[Any] = eos_token_id
lowerCAmelCase_ : Union[str, Any] = attention_types
lowerCAmelCase_ : str = self.expand_attention_types_params(SCREAMING_SNAKE_CASE_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
import torch
lowerCAmelCase_ : List[str] = input.size()
lowerCAmelCase_ : Optional[int] = len(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = shape[dimension]
lowerCAmelCase_ : Dict = torch.arange(0 , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = torch.div(sizedim - size , lowerCAmelCase__ , rounding_mode='floor' ) + 1
lowerCAmelCase_ : str = torch.arange(lowerCAmelCase__ ) + low_indices[:min_length][:, None]
lowerCAmelCase_ : Tuple = [slice(lowerCAmelCase__ )] * rank
lowerCAmelCase_ : Union[str, Any] = indices
lowerCAmelCase_ : Optional[Any] = input[s]
lowerCAmelCase_ : Any = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
import torch
lowerCAmelCase_ : Optional[Any] = torch.arange(1 , lowerCAmelCase__ )
lowerCAmelCase_ : int = torch.remainder(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = remainders == 0
lowerCAmelCase_ : Union[str, Any] = candidates[divisor_indices]
lowerCAmelCase_ : Tuple = torch.max(lowerCAmelCase__ )
return largest_divisor, torch.div(lowerCAmelCase__ , lowerCAmelCase__ , rounding_mode='floor' )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' )
lowerCAmelCase_ : Dict = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowerCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self._config.num_heads
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase_ : Dict = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Tuple = seqlen + 2
lowerCAmelCase_ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Dict = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Any = common_inputs['attention_mask']
if self.use_past:
lowerCAmelCase_ : int = ordered_inputs['attention_mask'].dtype
lowerCAmelCase_ : Optional[int] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 1_3
| 224 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = [0] * len(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Tuple = [1] * len(lowerCAmelCase__ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCAmelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCAmelCase__ )
while queue:
lowerCAmelCase_ : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase_ : Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowerCAmelCase__ )
print(max(lowerCAmelCase__ ) )
# Adjacency list of Graph
lowercase__ : Any = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 224 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = RoCBertTokenizer
a__ = None
a__ = False
a__ = True
a__ = filter_non_english
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
super().setUp()
a__: Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
a__: Dict = {}
a__: Any = {}
for i, value in enumerate(lowercase):
a__: Optional[Any] = i
a__: Tuple = i
a__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'])
a__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
with open(self.word_shape_file , 'w' , encoding='utf-8') as word_shape_writer:
json.dump(lowercase , lowercase , ensure_ascii=lowercase)
with open(self.word_pronunciation_file , 'w' , encoding='utf-8') as word_pronunciation_writer:
json.dump(lowercase , lowercase , ensure_ascii=lowercase)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
a__: List[Any] = tokenizer.tokenize('你好[SEP]你是谁')
self.assertListEqual(lowercase , ['你', '好', '[SEP]', '你', '是', '谁'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowercase) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowercase) , [5, 6, 2, 5, 7, 8])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Union[str, Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[int] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: List[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = RoCBertBasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Dict = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a__: Optional[int] = {}
for i, token in enumerate(lowercase):
a__: Any = i
a__: Dict = RoCBertWordpieceTokenizer(vocab=lowercase , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: str = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
if self.test_rust_tokenizer:
a__: Dict = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: str = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: List[Any] = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
a__: Optional[Any] = tokenizer_r.encode_plus(
lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , )
a__: str = tokenizer_r.do_lower_case if hasattr(lowercase , 'do_lower_case') else False
a__: List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: int = ['的', '人', '有']
a__: str = ''.join(lowercase)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: Optional[int] = True
a__: Optional[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: List[str] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = tokenizer_p.encode(lowercase , add_special_tokens=lowercase)
a__: str = tokenizer_r.encode(lowercase , add_special_tokens=lowercase)
a__: Dict = tokenizer_r.convert_ids_to_tokens(lowercase)
a__: List[Any] = tokenizer_p.convert_ids_to_tokens(lowercase)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase , lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Optional[int] = False
a__: str = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: int = tokenizer_r.encode(lowercase , add_special_tokens=lowercase)
a__: Optional[int] = tokenizer_p.encode(lowercase , add_special_tokens=lowercase)
a__: Any = tokenizer_r.convert_ids_to_tokens(lowercase)
a__: Any = tokenizer_p.convert_ids_to_tokens(lowercase)
# it is expected that only the first Chinese character is not preceded by "##".
a__: Any = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowercase)
]
self.assertListEqual(lowercase , lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
a__: Any = tokenizer.encode('你好' , add_special_tokens=lowercase)
a__: List[Any] = tokenizer.encode('你是谁' , add_special_tokens=lowercase)
a__: Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: str = self.get_tokenizers(do_lower_case=lowercase)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
a__: List[Any] = '你好,你是谁'
a__: Optional[Any] = tokenizer.tokenize(lowercase)
a__: Any = tokenizer.convert_tokens_to_ids(lowercase)
a__: int = tokenizer.convert_tokens_to_shape_ids(lowercase)
a__: str = tokenizer.convert_tokens_to_pronunciation_ids(lowercase)
a__: Tuple = tokenizer.prepare_for_model(
lowercase , lowercase , lowercase , add_special_tokens=lowercase)
a__: List[str] = tokenizer.encode_plus(lowercase , add_special_tokens=lowercase)
self.assertEqual(lowercase , lowercase)
| 203 | """simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DebertaTokenizer
a__ = True
a__ = DebertaTokenizerFast
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__: List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a__: List[str] = dict(zip(lowercase , range(len(lowercase))))
a__: Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__: Optional[Any] = {'unk_token': '[UNK]'}
a__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Tuple = 'lower newer'
a__: int = 'lower newer'
return input_text, output_text
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.get_tokenizer()
a__: List[Any] = 'lower newer'
a__: Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__: Optional[Any] = tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokens + [tokenizer.unk_token]
a__: Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.get_tokenizer()
a__: Any = tokenizer('Hello' , 'World')
a__: Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowercase)
@slow
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: List[str] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
a__: int = tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: List[str] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a__: Dict = tokenizer(lowercase , padding=lowercase)
a__: Union[str, Any] = [tokenizer.decode(lowercase , skip_special_tokens=lowercase) for seq in encoding['input_ids']]
# fmt: off
a__: Any = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__: str = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowercase)
for expected, decoded in zip(lowercase , lowercase):
self.assertEqual(lowercase , lowercase)
| 203 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : int = """encodec"""
def __init__( self, lowerCamelCase=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], lowerCamelCase=2_40_00, lowerCamelCase=1, lowerCamelCase=False, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=1_28, lowerCamelCase=32, lowerCamelCase=1, lowerCamelCase=[8, 5, 4, 2], lowerCamelCase="weight_norm", lowerCamelCase=7, lowerCamelCase=7, lowerCamelCase=3, lowerCamelCase=2, lowerCamelCase=True, lowerCamelCase="reflect", lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=1.0, lowerCamelCase=10_24, lowerCamelCase=None, lowerCamelCase=True, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = target_bandwidths
_lowercase : Dict = sampling_rate
_lowercase : List[Any] = audio_channels
_lowercase : Any = normalize
_lowercase : str = chunk_length_s
_lowercase : Tuple = overlap
_lowercase : str = hidden_size
_lowercase : List[Any] = num_filters
_lowercase : Tuple = num_residual_layers
_lowercase : Union[str, Any] = upsampling_ratios
_lowercase : Union[str, Any] = norm_type
_lowercase : Optional[int] = kernel_size
_lowercase : List[str] = last_kernel_size
_lowercase : Optional[Any] = residual_kernel_size
_lowercase : Union[str, Any] = dilation_growth_rate
_lowercase : Tuple = use_causal_conv
_lowercase : Tuple = pad_mode
_lowercase : str = compress
_lowercase : List[str] = num_lstm_layers
_lowercase : Optional[Any] = trim_right_ratio
_lowercase : List[Any] = codebook_size
_lowercase : Dict = codebook_dim if codebook_dim is not None else hidden_size
_lowercase : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCamelCase)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length))
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 21 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if len(lowerCamelCase_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_lowercase : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : Optional[int] = torch.nn.Linear(10 , 10 )
UpperCamelCase : Tuple = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase : str = Accelerator()
UpperCamelCase : Union[str, Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE_ ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 355 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( _UpperCAmelCase , unittest.TestCase ):
lowercase : Any = AudioLDMPipeline
lowercase : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowercase : List[str] = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase : Tuple = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def a_ ( self ):
torch.manual_seed(0 )
UpperCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase : Optional[int] = ClapTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase : Tuple = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a_ ( self ):
UpperCamelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Any = self.get_dummy_components()
UpperCamelCase : int = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Tuple = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[str] = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Optional[int] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
UpperCamelCase : Tuple = prompt_embeds
# forward
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : List[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = 3 * ["""this is a negative prompt"""]
UpperCamelCase : List[Any] = negative_prompt
UpperCamelCase : str = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase : str = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
UpperCamelCase : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCamelCase : int = audioldm_pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase : Union[str, Any] = text_inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = audioldm_pipe.text_encoder(
SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase : Optional[int] = F.normalize(SCREAMING_SNAKE_CASE_ , dim=-1 )
embeds.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Tuple = embeds
# forward
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = """egg cracking"""
UpperCamelCase : List[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 256
UpperCamelCase : Union[str, Any] = audio[:10]
UpperCamelCase : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Union[str, Any] = self.get_dummy_components()
UpperCamelCase : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase : List[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase : Dict = 2
UpperCamelCase : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase : List[str] = 2
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase : Any = 2
UpperCamelCase : str = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=SCREAMING_SNAKE_CASE_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a_ ( self ):
UpperCamelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = audioldm_pipe(audio_length_in_s=0.016 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.016
UpperCamelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) / vocoder_sampling_rate == 0.032
def a_ ( self ):
UpperCamelCase : str = self.get_dummy_components()
UpperCamelCase : Optional[Any] = AudioLDMPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = ["""hey"""]
UpperCamelCase : Dict = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : str = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase : str = SpeechTaHifiGan(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = audioldm_pipe(SCREAMING_SNAKE_CASE_ , num_inference_steps=1 )
UpperCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a_ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ )
@slow
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="cpu" , SCREAMING_SNAKE_CASE_=torch.floataa , SCREAMING_SNAKE_CASE_=0 ):
UpperCamelCase : str = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a_ ( self ):
UpperCamelCase : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : List[Any] = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 25
UpperCamelCase : Optional[Any] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[7_7230:7_7240]
UpperCamelCase : Optional[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCamelCase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a_ ( self ):
UpperCamelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase : str = audioldm_pipe.to(SCREAMING_SNAKE_CASE_ )
audioldm_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = audioldm_pipe(**SCREAMING_SNAKE_CASE_ ).audios[0]
assert audio.ndim == 1
assert len(SCREAMING_SNAKE_CASE_ ) == 8_1920
UpperCamelCase : Union[str, Any] = audio[2_7780:2_7790]
UpperCamelCase : Tuple = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCamelCase : Tuple = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_a = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a = BASE_URL + "/user"
# https://github.com/settings/tokens
_a = os.environ.get('USER_TOKEN', '')
def _A ( UpperCamelCase_ : str) -> dict[Any, Any]:
'''simple docstring'''
__lowercase = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(UpperCamelCase_, headers=UpperCamelCase_).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 17 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 336 | 0 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowercase = {}
lowercase = 2
while True:
lowercase = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if factor:
lowercase = factor + prime
while x in factor_map:
x += factor
lowercase = factor
else:
lowercase = prime
yield prime
prime += 1
def UpperCAmelCase__ ( lowerCAmelCase__ :float = 1e10 ) -> int:
'''simple docstring'''
lowercase = sieve()
lowercase = 1
while True:
lowercase = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 371 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( __a ):
_lowercase =''''''
_lowercase =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase =None # compression type in fsspec. ex: "gzip"
_lowercase =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _UpperCamelCase = "" , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase ) -> List[Any]:
super().__init__(self , **_UpperCamelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase_ = fsspec.open(
_UpperCamelCase , mode="rb" , protocol=_UpperCamelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCAmelCase_ = os.path.basename(self.file.path.split("::" )[0] )
lowerCAmelCase_ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCAmelCase_ = None
@classmethod
def __a ( cls , _UpperCamelCase ) -> int:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_UpperCamelCase ).lstrip("/" )
def __a ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowerCAmelCase_ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCAmelCase_ = {f["name"]: f}
def __a ( self , _UpperCamelCase ) -> List[str]:
return self.file.open().read()
def __a ( self , _UpperCamelCase , _UpperCamelCase = "rb" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Optional[int]:
lowerCAmelCase_ = self._strip_protocol(_UpperCamelCase )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class _lowerCAmelCase ( __a ):
_lowercase ='''bz2'''
_lowercase ='''bz2'''
_lowercase ='''.bz2'''
class _lowerCAmelCase ( __a ):
_lowercase ='''gzip'''
_lowercase ='''gzip'''
_lowercase ='''.gz'''
class _lowerCAmelCase ( __a ):
_lowercase ='''lz4'''
_lowercase ='''lz4'''
_lowercase ='''.lz4'''
class _lowerCAmelCase ( __a ):
_lowercase ='''xz'''
_lowercase ='''xz'''
_lowercase ='''.xz'''
class _lowerCAmelCase ( __a ):
_lowercase ='''zstd'''
_lowercase ='''zstd'''
_lowercase ='''.zst'''
def __init__( self , _UpperCamelCase , _UpperCamelCase = "rb" , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = DEFAULT_BLOCK_SIZE , **_UpperCamelCase , ) -> Any:
super().__init__(
fo=_UpperCamelCase , mode=_UpperCamelCase , target_protocol=_UpperCamelCase , target_options=_UpperCamelCase , block_size=_UpperCamelCase , **_UpperCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase_ = self.file.__enter__
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = file_
def __enter__( self ) -> List[str]:
self._file.__enter__()
return self
def __exit__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
self._file.__exit__(*_UpperCamelCase , **_UpperCamelCase )
def __iter__( self ) -> str:
return iter(self._file )
def __a ( self ) -> Any:
return next(self._file )
def __getattr__( self , _UpperCamelCase ) -> Any:
return getattr(self._file , _UpperCamelCase )
def fixed_enter(*_UpperCamelCase , **_UpperCamelCase ):
return WrappedFile(_enter(*_UpperCamelCase , **_UpperCamelCase ) )
lowerCAmelCase_ = fixed_enter
| 231 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _lowercase ( _UpperCAmelCase ) -> str:
lowerCamelCase =[]
for line in lines:
lowerCamelCase =re.sub(r"""#.*""" , """""" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
lowerCamelCase ="""\n""".join(_UpperCAmelCase )
# Make a hash from all this code
lowerCamelCase =full_str.encode("""utf-8""" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase__ : str ={
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase__ : Tuple ={
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase__ : Optional[Any] ={'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
UpperCAmelCase__ : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 358 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (preds == labels).mean()
@dataclass
class __A :
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
__A = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__A = field(metadata={"""help""": """Should contain the data files for the task."""} )
__A = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase =processors[data_args.task_name]()
lowerCamelCase =processor.get_labels()
lowerCamelCase =len(_UpperCAmelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCAmelCase ) -> Dict:
lowerCamelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCAmelCase , p.label_ids )}
# Data collator
lowerCamelCase =DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase =trainer.evaluate()
lowerCamelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
return results
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 262 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _a ( UpperCAmelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _a ( UpperCAmelCase , UpperCAmelCase ) -> XGBClassifier:
"""simple docstring"""
lowerCamelCase__ : List[Any] = XGBClassifier()
classifier.fit(A__ , A__ )
return classifier
def _a ( ) -> None:
"""simple docstring"""
lowerCamelCase__ : str = load_iris()
lowerCamelCase__ , lowerCamelCase__ : List[str] = data_handling(A__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = train_test_split(
A__ , A__ , test_size=0.25 )
lowerCamelCase__ : int = iris['''target_names''']
# Create an XGBoost Classifier from the training data
lowerCamelCase__ : Dict = xgboost(A__ , A__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
A__ , A__ , A__ , display_labels=A__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 142 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 0 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ ) | 31 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase : str = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square(UpperCAmelCase , UpperCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase__ : int = update_area_of_max_square(UpperCAmelCase , col + 1 )
lowercase__ : Any = update_area_of_max_square(row + 1 , col + 1 )
lowercase__ : str = update_area_of_max_square(row + 1 , UpperCAmelCase )
if mat[row][col]:
lowercase__ : List[Any] = 1 + min([right, diagonal, down] )
lowercase__ : List[Any] = max(largest_square_area[0] , UpperCAmelCase )
return sub_problem_sol
else:
return 0
lowercase__ : Dict = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def update_area_of_max_square_using_dp_array(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase__ : int = update_area_of_max_square_using_dp_array(UpperCAmelCase , col + 1 , UpperCAmelCase )
lowercase__ : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCAmelCase )
lowercase__ : Any = update_area_of_max_square_using_dp_array(row + 1 , UpperCAmelCase , UpperCAmelCase )
if mat[row][col]:
lowercase__ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase__ : Any = max(largest_square_area[0] , UpperCAmelCase )
lowercase__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase__ : Any = [0]
lowercase__ : List[Any] = [[-1] * cols for _ in range(UpperCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCAmelCase )
return largest_square_area[0]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase__ : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : str = dp_array[row][col + 1]
lowercase__ : Optional[Any] = dp_array[row + 1][col + 1]
lowercase__ : str = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase__ : Dict = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : str = max(dp_array[row][col] , UpperCAmelCase )
else:
lowercase__ : Any = 0
return largest_square_area
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = [0] * (cols + 1)
lowercase__ : str = [0] * (cols + 1)
lowercase__ : Tuple = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase__ : List[Any] = current_row[col + 1]
lowercase__ : Any = next_row[col + 1]
lowercase__ : Optional[Any] = next_row[col]
if mat[row][col] == 1:
lowercase__ : List[str] = 1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = max(current_row[col] , UpperCAmelCase )
else:
lowercase__ : int = 0
lowercase__ : int = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 198 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_A = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
# Initialise PyTorch model
lowerCAmelCase__ : int = TaConfig.from_json_file(__UpperCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 212 | 0 |
from math import factorial
A__ : str = {str(d): factorial(d) for d in range(10)}
def UpperCamelCase( __UpperCamelCase : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(__UpperCamelCase ) )
def UpperCamelCase( ):
lowerCAmelCase_ : str = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,__UpperCamelCase ) if sum_of_digit_factorial(__UpperCamelCase ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 103 | """simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self , a , a=1_2 , a=7 , a=True , a=True , a=True , a=9_9 , a=3_2 , a=3_2 , a=2 , a=4 , a=3_7 , a=0.1 , a=0.1 , a=5_1_2 , a=0.02 , a=0 , a=None , ) -> Union[str, Any]:
lowercase__ : Any = parent
lowercase__ : str = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : List[str] = use_input_mask
lowercase__ : int = use_labels
lowercase__ : List[Any] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : int = projection_dim
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : str = initializer_range
lowercase__ : Tuple = scope
lowercase__ : int = bos_token_id
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : int = None
if self.use_input_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ : int = input_mask.numpy()
lowercase__ , lowercase__ : Tuple = input_mask.shape
lowercase__ : List[str] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a ):
lowercase__ : Dict = 1
lowercase__ : Union[str, Any] = 0
lowercase__ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(a )
def _UpperCAmelCase ( self ) -> List[Any]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , a , a , a ) -> Any:
lowercase__ : List[Any] = TFBlipTextModel(config=a )
lowercase__ : Optional[int] = model(a , attention_mask=a , training=a )
lowercase__ : List[str] = model(a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Dict = (TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[int] = BlipTextModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _UpperCAmelCase ( self ) -> str:
pass
@slow
def _UpperCAmelCase ( self ) -> int:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = TFBlipTextModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCAmelCase ( self , a=True ) -> List[str]:
super().test_pt_tf_model_equivalence(allow_missing_keys=a )
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : List[str] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""input_features""", """attention_mask"""]
def __init__( self : Tuple , _lowerCAmelCase : Optional[Any]=8_0 , _lowerCAmelCase : List[str]=1_6_0_0_0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Tuple=2_5 , _lowerCAmelCase : Union[str, Any]="hamming_window" , _lowerCAmelCase : int=3_2768.0 , _lowerCAmelCase : Optional[Any]=0.97 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=False , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =feature_size
__lowercase =sampling_rate
__lowercase =padding_value
__lowercase =hop_length
__lowercase =win_length
__lowercase =frame_signal_scale
__lowercase =preemphasis_coeff
__lowercase =mel_floor
__lowercase =normalize_means
__lowercase =normalize_vars
__lowercase =win_function
__lowercase =return_attention_mask
__lowercase =win_length * sampling_rate // 1_0_0_0
__lowercase =hop_length * sampling_rate // 1_0_0_0
__lowercase =optimal_fft_length(self.sample_size)
__lowercase =(self.n_fft // 2) + 1
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : np.array):
'''simple docstring'''
if self.win_function == "hamming_window":
__lowercase =window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCAmelCase)
else:
__lowercase =window_function(window_length=self.sample_size , name=self.win_function)
__lowercase =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
__lowercase =spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCAmelCase , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __lowerCamelCase ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int):
'''simple docstring'''
if self.normalize_means:
__lowercase =x[:input_length].mean(axis=0)
__lowercase =np.subtract(_lowerCAmelCase , _lowerCAmelCase)
if self.normalize_vars:
__lowercase =x[:input_length].std(axis=0)
__lowercase =np.divide(_lowerCAmelCase , _lowerCAmelCase)
if input_length < x.shape[0]:
__lowercase =padding_value
# make sure array is in float32
__lowercase =x.astype(np.floataa)
return x
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : List[np.ndarray] , _lowerCAmelCase : Optional[np.ndarray] = None):
'''simple docstring'''
__lowercase =attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCAmelCase , _lowerCAmelCase , self.padding_value) for x, n in zip(_lowerCAmelCase , _lowerCAmelCase)]
def __call__( self : Any , _lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[int] = None , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__lowercase =isinstance(_lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
__lowercase =is_batched_numpy or (
isinstance(_lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__lowercase =[np.asarray(_lowerCAmelCase , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray):
__lowercase =np.asarray(_lowerCAmelCase , dtype=np.floataa)
elif isinstance(_lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowercase =raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowercase =[raw_speech]
# extract fbank features
__lowercase =[self._extract_mfsc_features(_lowerCAmelCase) for one_waveform in raw_speech]
# convert into correct format for padding
__lowercase =BatchFeature({'input_features': features})
__lowercase =self.pad(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
# make sure list is in array format
__lowercase =padded_inputs.get('input_features')
if isinstance(input_features[0] , _lowerCAmelCase):
__lowercase =[np.asarray(_lowerCAmelCase , dtype=np.floataa) for feature in input_features]
__lowercase =padded_inputs.get('attention_mask')
if attention_mask is not None:
__lowercase =[np.asarray(_lowerCAmelCase , dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
__lowercase =(
np.array(_lowerCAmelCase , dtype=np.intaa)
if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
__lowercase =self.normalize(
padded_inputs['input_features'] , attention_mask=_lowerCAmelCase)
if return_tensors is not None:
__lowercase =padded_inputs.convert_to_tensors(_lowerCAmelCase)
return padded_inputs
| 166 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """blenderbot-small"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Tuple , _lowerCAmelCase : Any=5_0_2_6_5 , _lowerCAmelCase : str=5_1_2 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : str=1_6 , _lowerCAmelCase : Optional[int]=8 , _lowerCAmelCase : str=2_0_4_8 , _lowerCAmelCase : Dict=1_6 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple="gelu" , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : str=0 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Any=2 , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =max_position_embeddings
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =use_cache
__lowercase =encoder_layers
__lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class _UpperCamelCase ( A ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase ={0: 'batch'}
__lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
__lowercase ={0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase =OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super().outputs
else:
__lowercase =super(_lowerCAmelCase , self).outputs
if self.use_past:
__lowercase , __lowercase =self.num_layers
for i in range(_lowerCAmelCase):
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
__lowercase ={0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# Generate decoder inputs
__lowercase =seq_length if not self.use_past else 1
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase ={f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowercase =dict(**_lowerCAmelCase , **_lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
__lowercase =common_inputs['decoder_input_ids'].shape[1]
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =decoder_seq_length + 3
__lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase =torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase)] , dim=1)
__lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase =self.num_layers
__lowercase =min(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =max(_lowerCAmelCase , _lowerCAmelCase) - min_num_layers
__lowercase ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_lowerCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
torch.zeros(_lowerCAmelCase),
))
# TODO: test this.
__lowercase =encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)))
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase , __lowercase =self.num_layers
__lowercase , __lowercase =self.num_attention_heads
__lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase =common_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[common_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(_lowerCAmelCase)
]
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase =tokenizer.num_special_tokens_to_add(_lowerCAmelCase)
__lowercase =compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase)
# Generate dummy inputs according to compute batch and sequence
__lowercase =[' '.join([tokenizer.unk_token]) * seq_length] * batch_size
__lowercase =dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase))
return common_inputs
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
elif self.task == "causal-lm":
__lowercase =self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
else:
__lowercase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
return common_inputs
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowercase =super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
else:
__lowercase =super(_lowerCAmelCase , self)._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
| 166 | 1 |
import torch
from diffusers import DiffusionPipeline
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
def __call__( self : List[Any] ) ->int:
"""simple docstring"""
a = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
a = 1
a = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
a = scheduler_output - scheduler_output + torch.ones_like(__UpperCAmelCase )
return result
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = torch.device("cpu")
def _a ( ) -> Union[str, Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
def _a ( a :Dict ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _a ( a :int , a :Any , a :Union[str, Any] ) -> int:
a = dct.pop(a )
a = val
def _a ( a :Any ) -> Dict:
a = []
for k in state_dict.keys():
a = k
if ".pwconv" in k:
a = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a = k_new.split('''.''' )
if ls[2].isdigit():
a = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( a :List[Any] , a :Tuple , a :List[str] ) -> Union[str, Any]:
a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a = [3, 3, 6, 4]
a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a = [3, 3, 9, 6]
a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a = [4, 3, 10, 5]
a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a = [4, 4, 12, 6]
a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a )
else:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint
a = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
a = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
a = prepare_img()
a = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a = processor(images=a , return_tensors='''pt''' )
# compare outputs from both models
a = get_expected_output(a )
a = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 | 1 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 60 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : List[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_:
__UpperCamelCase = PegasusConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Any = model_class_name(UpperCamelCase_ )
lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Dict = 2_0
lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 60 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ : Tuple = model_name.find("""patch""" )
lowerCAmelCase_ : List[Any] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
lowerCAmelCase_ : List[str] = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
lowerCAmelCase_ : str = 768
lowerCAmelCase_ : Dict = 3072
lowerCAmelCase_ : Optional[Any] = 12
lowerCAmelCase_ : int = 1024
lowerCAmelCase_ : List[Any] = 4096
lowerCAmelCase_ : List[str] = 16
lowerCAmelCase_ : Optional[int] = 24
lowerCAmelCase_ : Tuple = 768
lowerCAmelCase_ : List[str] = 3072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ : Dict = 336
lowerCAmelCase_ : Optional[Any] = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
lowerCAmelCase_ : Union[str, Any] = 768
return config
def __UpperCamelCase ( lowercase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
if name == "token_embedding.weight":
lowerCAmelCase_ : Optional[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
lowerCAmelCase_ : Any = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowerCAmelCase_ : int = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowerCAmelCase_ : int = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowerCAmelCase_ : Tuple = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
lowerCAmelCase_ : Optional[Any] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ : List[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
lowerCAmelCase_ : Any = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ : List[str] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
lowerCAmelCase_ : Dict = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
lowerCAmelCase_ : Optional[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
lowerCAmelCase_ : List[Any] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
lowerCAmelCase_ : int = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
lowerCAmelCase_ : Dict = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
lowerCAmelCase_ : Dict = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
lowerCAmelCase_ : Dict = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
lowerCAmelCase_ : str = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ : Tuple = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
lowerCAmelCase_ : List[str] = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
lowerCAmelCase_ : Tuple = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : List[Any] = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
lowerCAmelCase_ : List[str] = key.split(""".""" )
if key.startswith("""visual""" ):
lowerCAmelCase_ : Dict = key_split[3]
lowerCAmelCase_ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ : Dict = val[
:dim, :
]
lowerCAmelCase_ : int = val[
dim : dim * 2, :
]
lowerCAmelCase_ : List[str] = val[
-dim:, :
]
else:
lowerCAmelCase_ : Optional[Any] = val[
:dim
]
lowerCAmelCase_ : List[Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ : Tuple = val[
:dim, :
]
lowerCAmelCase_ : int = val[
dim : dim * 2, :
]
lowerCAmelCase_ : Tuple = val[
-dim:, :
]
else:
lowerCAmelCase_ : Any = val[:dim]
lowerCAmelCase_ : int = val[
dim : dim * 2
]
lowerCAmelCase_ : List[str] = val[-dim:]
elif key.startswith("""mit""" ):
lowerCAmelCase_ : Tuple = key_split[2]
lowerCAmelCase_ : Optional[int] = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ : List[Any] = val[:dim, :]
lowerCAmelCase_ : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase_ : Any = val[-dim:, :]
else:
lowerCAmelCase_ : List[Any] = val[:dim]
lowerCAmelCase_ : Any = val[dim : dim * 2]
lowerCAmelCase_ : Optional[Any] = val[-dim:]
else:
lowerCAmelCase_ : List[str] = key_split[2]
lowerCAmelCase_ : int = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ : Union[str, Any] = val[:dim, :]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase_ : Optional[int] = val[-dim:, :]
else:
lowerCAmelCase_ : Union[str, Any] = val[:dim]
lowerCAmelCase_ : List[str] = val[
dim : dim * 2
]
lowerCAmelCase_ : int = val[-dim:]
else:
lowerCAmelCase_ : List[Any] = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ : Tuple = val.T
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def __UpperCamelCase ( lowercase__ : List[str] ) -> Any:
'''simple docstring'''
if num_frames == 8:
lowerCAmelCase_ : Optional[Any] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
lowerCAmelCase_ : List[Any] = """eating_spaghetti.npy"""
elif num_frames == 32:
lowerCAmelCase_ : Optional[int] = """eating_spaghetti_32_frames.npy"""
lowerCAmelCase_ : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowercase__ , repo_type="""dataset""" , )
lowerCAmelCase_ : Any = np.load(lowercase__ )
return list(lowercase__ )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : str=None , lowercase__ : List[Any]=False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
lowerCAmelCase_ : Optional[int] = model_to_url[model_name]
lowerCAmelCase_ : Optional[int] = 8
if "16-frames" in model_name:
lowerCAmelCase_ : Dict = 16
elif "shot" in model_name:
lowerCAmelCase_ : str = 32
lowerCAmelCase_ : List[str] = get_xclip_config(lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ : Any = """pytorch_model.bin"""
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
lowerCAmelCase_ : Dict = torch.load(lowercase__ , map_location="""cpu""" )["""model"""]
else:
lowerCAmelCase_ : Any = torch.hub.load_state_dict_from_url(lowercase__ )["""model"""]
lowerCAmelCase_ : Any = convert_state_dict(lowercase__ , lowercase__ )
lowerCAmelCase_ : List[str] = XCLIPModel(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ : Optional[int] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
lowerCAmelCase_ : Any = VideoMAEImageProcessor(size=lowercase__ )
lowerCAmelCase_ : str = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCAmelCase_ : Tuple = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCAmelCase_ : Optional[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
lowerCAmelCase_ : Tuple = prepare_video(lowercase__ )
lowerCAmelCase_ : Any = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowercase__ , return_tensors="""pt""" , padding=lowercase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**lowercase__ )
# Verify outputs
lowerCAmelCase_ : Tuple = outputs.logits_per_video
lowerCAmelCase_ : List[Any] = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ : Tuple = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ : List[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ : int = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ : int = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ : int = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ : int = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ : List[Any] = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ : Optional[int] = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ : str = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ : Union[str, Any] = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ : List[Any] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ : List[str] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ : Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ : Optional[Any] = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ : Union[str, Any] = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(f'Model name {model_name} not supported' )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowercase__ , organization="""nielsr""" )
processor.push_to_hub(lowercase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowercase__ , organization="""nielsr""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( lowercase = 35 ):
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(lowercase ) and is_sq(lowercase ):
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase ) and is_sq(lowercase ):
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = int(sqrt(lowercase ) )
_UpperCAmelCase = gcd(lowercase ,lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
unique_s.add(lowercase )
for num, den in unique_s:
total += Fraction(lowercase ,lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 289 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): # picklable for multiprocessing
return x.sum()
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): # picklable for multiprocessing
return i + 1
@dataclass
class UpperCamelCase :
lowercase = 42
lowercase = 42
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Dict = {}
lowercase_ : List[Any] = []
lowercase_ : List[Any] = 1
lowercase_ : Union[str, Any] = [1, 2]
lowercase_ : int = {'a': 1, 'b': 2}
lowercase_ : int = {'a': [1, 2], 'b': [3, 4]}
lowercase_ : Dict = {'a': {'1': 1}, 'b': 2}
lowercase_ : List[str] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowercase_ : Union[str, Any] = {}
lowercase_ : Tuple = []
lowercase_ : List[str] = 2
lowercase_ : Optional[int] = [2, 3]
lowercase_ : int = {'a': 2, 'b': 3}
lowercase_ : Any = {'a': [2, 3], 'b': [4, 5]}
lowercase_ : Any = {'a': {'1': 2}, 'b': 3}
lowercase_ : Tuple = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Any = 2
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : str = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
lowercase_ : Dict = {'a': 2, 'b': 0, 'c': 2}
lowercase_ : Tuple = {
'a': np.eye(2 ).astype(__UpperCamelCase ),
'b': np.zeros(3 ).astype(__UpperCamelCase ),
'c': np.ones(2 ).astype(__UpperCamelCase ),
}
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ,num_proc=__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__UpperCamelCase ,__UpperCamelCase ,map_numpy=__UpperCamelCase ,num_proc=__UpperCamelCase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(__UpperCamelCase ): # can't pickle a local lambda
map_nested(lambda __UpperCamelCase : x + 1 ,__UpperCamelCase ,num_proc=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = {'a': 1, 'b': 2}
lowercase_ : int = {'a': 3, 'b': 4}
lowercase_ : Any = {'a': 5, 'b': 6}
lowercase_ : int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
class UpperCamelCase :
lowercase = 'bar'
lowercase_ : int = Foo()
self.assertEqual(foo.my_attr ,'bar' )
with temporary_assignment(__UpperCamelCase ,'my_attr' ,'BAR' ):
self.assertEqual(foo.my_attr ,'BAR' )
self.assertEqual(foo.my_attr ,'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
lowercase_ : Any = {F'''{i}''': i for i in range(__SCREAMING_SNAKE_CASE )}
lowercase_ : str = map_nested(lambda __SCREAMING_SNAKE_CASE : x + 10 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase ( lowercase_ ):
@require_tf
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowercase_ : Union[str, Any] = layers.Dense(2 )
def gen_random_output():
lowercase_ : Optional[Any] = tf.random.uniform((1, 3) )
return model(__UpperCamelCase ).numpy()
with temp_seed(42 ,set_tensorflow=__UpperCamelCase ):
lowercase_ : int = gen_random_output()
with temp_seed(42 ,set_tensorflow=__UpperCamelCase ):
lowercase_ : Union[str, Any] = gen_random_output()
lowercase_ : Any = gen_random_output()
np.testing.assert_equal(__UpperCamelCase ,__UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
import torch
def gen_random_output():
lowercase_ : List[Any] = torch.nn.Linear(3 ,2 )
lowercase_ : Any = torch.rand(1 ,3 )
return model(__UpperCamelCase ).detach().numpy()
with temp_seed(42 ,set_pytorch=__UpperCamelCase ):
lowercase_ : str = gen_random_output()
with temp_seed(42 ,set_pytorch=__UpperCamelCase ):
lowercase_ : Dict = gen_random_output()
lowercase_ : Optional[int] = gen_random_output()
np.testing.assert_equal(__UpperCamelCase ,__UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(42 ):
lowercase_ : List[Any] = gen_random_output()
with temp_seed(42 ):
lowercase_ : List[Any] = gen_random_output()
lowercase_ : int = gen_random_output()
np.testing.assert_equal(__UpperCamelCase ,__UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize('input_data' , [{}] )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : str = NestedDataStructure(__SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : str = NestedDataStructure(__SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def lowercase__( ):
lowercase_ : List[str] = A(x=1 , y='foobar' )
lowercase_ : int = {'x': 1, 'y': 'foobar'}
assert asdict(__SCREAMING_SNAKE_CASE ) == expected_output
lowercase_ : Optional[int] = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
lowercase_ : int = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(__SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='foo' )] )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
return text.split()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowercase__( ):
with Pool(2 ) as pool:
lowercase_ : List[Any] = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase_ : Optional[Any] = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(__SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase_ : List[str] = []
for yield_time, content in iflatmap_unordered(
__SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__SCREAMING_SNAKE_CASE )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(__SCREAMING_SNAKE_CASE ) == 4
| 321 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 321 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( lowerCAmelCase__ ):
lowerCAmelCase_ : int = ["""image_processor""", """tokenizer"""]
lowerCAmelCase_ : Tuple = """BridgeTowerImageProcessor"""
lowerCAmelCase_ : List[str] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , a__ , a__ ) -> str:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , a__ , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
snake_case_ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase__ ( self , *a__ , **a__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 85 |
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a ( UpperCAmelCase , UpperCAmelCase ):
_lowercase = "pixel_values"
_lowercase = False
_lowercase = TimmBackboneConfig
def __init__( self , A_ , **A_ ):
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(A_ )
_UpperCAmelCase : Union[str, Any] = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(A_ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : List[Any] = getattr(A_ , "use_pretrained_backbone" , A_ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : Any = config.out_indices if getattr(A_ , "out_indices" , A_ ) is not None else (-1,)
_UpperCAmelCase : Union[str, Any] = timm.create_model(
config.backbone , pretrained=A_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=A_ , **A_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Tuple = {layer["module"]: str(A_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(A_ )
@classmethod
def _UpperCAmelCase ( cls , A_ , *A_ , **A_ ):
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("use_timm_backbone" , A_ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : int = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : Optional[Any] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : List[str] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : str = TimmBackboneConfig(
backbone=A_ , num_channels=A_ , features_only=A_ , use_pretrained_backbone=A_ , out_indices=A_ , )
return super()._from_config(A_ , **A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_ , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : Any = self._backbone(A_ , **A_ )
_UpperCAmelCase : Union[str, Any] = self._return_layers
_UpperCAmelCase : List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : int = self._backbone(A_ , **A_ )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = tuple(A_ )
_UpperCAmelCase : List[str] = tuple(A_ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Tuple = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : Optional[int] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=A_ , hidden_states=A_ , attentions=A_ )
| 189 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 1 |
"""simple docstring"""
from random import randint, random
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = 5 , ):
"""simple docstring"""
__A = [[-1] * number_of_cells] # Create a highway without any car
__A = 0
__A = max(__UpperCamelCase , 0 )
while i < number_of_cells:
__A = (
randint(0 , __UpperCamelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 0
__A = highway_now[car_index + 1 :]
for cell in range(len(__UpperCamelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__UpperCamelCase , -1 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = len(__UpperCamelCase )
# Beforce calculations, the highway is empty
__A = [-1] * number_of_cells
for car_index in range(__UpperCamelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__A = min(highway_now[car_index] + 1 , __UpperCamelCase )
# Number of empty cell before the next car
__A = get_distance(__UpperCamelCase , __UpperCamelCase ) - 1
# We can't have the car causing an accident
__A = min(next_highway[car_index] , __UpperCamelCase )
if random() < probability:
# Randomly, a driver will slow down
__A = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = len(highway[0] )
for i in range(__UpperCamelCase ):
__A = update(highway[i] , __UpperCamelCase , __UpperCamelCase )
__A = [-1] * number_of_cells
for car_index in range(__UpperCamelCase ):
__A = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__A = (car_index + speed) % number_of_cells
# Commit the change of position
__A = speed
highway.append(__UpperCamelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple, _lowerCamelCase : List[str]=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : int=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : int=99, _lowerCamelCase : Optional[int]=32, _lowerCamelCase : Tuple=5, _lowerCamelCase : Tuple=4, _lowerCamelCase : str=37, _lowerCamelCase : Union[str, Any]="gelu", _lowerCamelCase : int=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Dict=5_12, _lowerCamelCase : List[Any]=16, _lowerCamelCase : Any=2, _lowerCamelCase : Any=0.02, _lowerCamelCase : Dict=4, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
__A = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = True
A_ : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = FlaxRoFormerModelTester(self )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''', from_pt=_lowerCamelCase )
__A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__A = jnp.array([[0, 1, 2, 3, 4, 5]] )
__A = model(_lowerCamelCase )[0]
__A = 5_00_00
__A = (1, 6, vocab_size)
self.assertEqual(output.shape, _lowerCamelCase )
__A = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3], _lowerCamelCase, atol=1e-4 ) )
| 266 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class a__ ( UpperCAmelCase__ ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCAmelCase__ , )
| 237 | '''simple docstring'''
__UpperCAmelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = input('''Enter message: ''' )
__lowerCamelCase = input('''Enter key [alphanumeric]: ''' )
__lowerCamelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowerCamelCase = '''encrypt'''
__lowerCamelCase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__lowerCamelCase = '''decrypt'''
__lowerCamelCase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''encrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''decrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = key.upper()
for symbol in message:
__lowerCamelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCamelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCamelCase__ ):
__lowerCamelCase = 0
else:
translated.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 237 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
__lowercase = StableDiffusionPipeline.from_pretrained(A_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__lowercase = load_file(A_ )
__lowercase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__lowercase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
__lowercase = pipeline.text_encoder
else:
__lowercase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
__lowercase = pipeline.unet
# find the target layer
__lowercase = layer_infos.pop(0 )
while len(A_ ) > -1:
try:
__lowercase = curr_layer.__getattr__(A_ )
if len(A_ ) > 0:
__lowercase = layer_infos.pop(0 )
elif len(A_ ) == 0:
break
except Exception:
if len(A_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__lowercase = layer_infos.pop(0 )
__lowercase = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(A_ )
else:
pair_keys.append(A_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__lowercase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__lowercase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(A_ , A_ ).unsqueeze(2 ).unsqueeze(3 )
else:
__lowercase = state_dict[pair_keys[0]].to(torch.floataa )
__lowercase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(A_ , A_ )
# update visited list
for item in pair_keys:
visited.append(A_ )
return pipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.base_model_path
SCREAMING_SNAKE_CASE__ = args.checkpoint_path
SCREAMING_SNAKE_CASE__ = args.dump_path
SCREAMING_SNAKE_CASE__ = args.lora_prefix_unet
SCREAMING_SNAKE_CASE__ = args.lora_prefix_text_encoder
SCREAMING_SNAKE_CASE__ = args.alpha
SCREAMING_SNAKE_CASE__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
SCREAMING_SNAKE_CASE__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 325 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return choice(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = random_pivot(UpperCamelCase__ )
# partition based on pivot
# linear time
A__ = [e for e in lst if e < pivot]
A__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCamelCase__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCamelCase__ ) < k - 1:
return kth_number(UpperCamelCase__ , k - len(UpperCamelCase__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | """simple docstring"""
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = None
A__ = None
A__ = graph
self._normalize_graph(__UpperCAmelCase ,__UpperCAmelCase )
A__ = len(__UpperCAmelCase )
A__ = None
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
if sources is int:
A__ = [sources]
if sinks is int:
A__ = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
A__ = sources[0]
A__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
A__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
A__ = max_input_flow
A__ = 0
A__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A__ = max_input_flow
A__ = size - 1
def snake_case__ ( self ) -> Optional[int]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
A__ = algorithm(self )
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = flow_network
A__ = flow_network.verticesCount
A__ = flow_network.sourceIndex
A__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A__ = flow_network.graph
A__ = False
def snake_case__ ( self ) -> Optional[Any]:
if not self.executed:
self._algorithm()
A__ = True
def snake_case__ ( self ) -> Tuple:
pass
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase )
# use this to save your result
A__ = -1
def snake_case__ ( self ) -> Any:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> int:
super().__init__(__UpperCAmelCase )
A__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A__ = [0] * self.verticies_count
A__ = [0] * self.verticies_count
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A__ = 0
while i < len(__UpperCAmelCase ):
A__ = vertices_list[i]
A__ = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(__UpperCAmelCase ) )
A__ = 0
else:
i += 1
A__ = sum(self.preflow[self.source_index] )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase ,__UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
A__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A__ = self.heights[to_index]
if min_height is not None:
A__ = min_height + 1
if __name__ == "__main__":
__lowerCamelCase = [0]
__lowerCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 154 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCamelCase : Optional[Any] = 25_6047
__lowerCamelCase : List[str] = 25_6145
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Dict = NllbTokenizer
_UpperCAmelCase :List[str] = NllbTokenizerFast
_UpperCAmelCase :Any = True
_UpperCAmelCase :str = True
_UpperCAmelCase :str = {}
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : str = NllbTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = NllbTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(A_ , **A_ )
UpperCamelCase : Dict = tempfile.mkdtemp()
UpperCamelCase : List[Any] = tokenizer_r.save_pretrained(A_ )
UpperCamelCase : int = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
UpperCamelCase : Any = tokenizer_r.from_pretrained(A_ )
UpperCamelCase : Tuple = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase : str = tempfile.mkdtemp()
UpperCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
UpperCamelCase : Dict = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
UpperCamelCase : Dict = tokenizer_r.from_pretrained(A_ )
UpperCamelCase : Dict = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase : int = tempfile.mkdtemp()
UpperCamelCase : Optional[Any] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
UpperCamelCase : List[Any] = tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase : Optional[int] = tokenizer_r.from_pretrained(A_ )
UpperCamelCase : Dict = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_seqaseq:
return
UpperCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
UpperCamelCase : int = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
UpperCamelCase : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
UpperCamelCase : List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCamelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
A_ , tgt_texts=A_ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCamelCase : str = tokenizer.prepare_seqaseq_batch(
src_texts=A_ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , A_ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Dict = [AddedToken("<special>" , lstrip=A_ )]
UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ )
UpperCamelCase : Union[str, Any] = tokenizer_r.encode("Hey this is a <special> token" )
UpperCamelCase : List[Any] = tokenizer_r.encode("<special>" , add_special_tokens=A_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , **A_ )
UpperCamelCase : List[str] = tokenizer_p.encode("Hey this is a <special> token" )
UpperCamelCase : Optional[int] = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
_UpperCAmelCase :List[Any] = 'facebook/nllb-200-distilled-600M'
_UpperCAmelCase :Optional[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_UpperCAmelCase :Optional[int] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_UpperCAmelCase :Any = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def __UpperCamelCase( cls ):
'''simple docstring'''
UpperCamelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
UpperCamelCase : int = 1
return cls
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_6057 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertIn(A_ , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase : str = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
UpperCamelCase : Optional[Any] = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
UpperCamelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , A_ )
UpperCamelCase : str = 10
UpperCamelCase : Dict = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A_ )
self.assertEqual(len(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_6203, 3] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
UpperCamelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
UpperCamelCase : List[str] = NllbTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase : Optional[Any] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_ )
self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors="pt" )
UpperCamelCase : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors="pt" )
UpperCamelCase : str = targets["input_ids"]
UpperCamelCase : List[Any] = shift_tokens_right(
A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(A_ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
} , )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = True
UpperCamelCase : List[Any] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[Any] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 52 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = KandinskyInpaintPipeline
_UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase : Optional[int] = MultilingualCLIP(A_ )
UpperCamelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Optional[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : str = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Tuple = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = "a hat"
UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Optional[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Dict = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 52 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__lowerCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__lowerCAmelCase = dataset.iloc[:, 1:2].values
__lowerCAmelCase = dataset.iloc[:, 2].values
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__lowerCAmelCase = PolynomialFeatures(degree=4)
__lowerCAmelCase = poly_reg.fit_transform(X)
__lowerCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def __SCREAMING_SNAKE_CASE ( ):
plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""red""" )
plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 366 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase="None" , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = relative_attention
_snake_case = position_biased_input
_snake_case = pos_att_type
_snake_case = scope
def lowercase (self ) -> List[Any]:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase (self ) -> int:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase (self ) -> int:
_snake_case = self.get_config()
_snake_case = 300
return config
def lowercase (self , UpperCAmelCase ) -> Dict:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_snake_case = DebertaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
_snake_case = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
_snake_case = model(UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_snake_case = DebertaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_snake_case = self.num_labels
_snake_case = DebertaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = self.num_labels
_snake_case = DebertaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = DebertaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase (self ) -> Tuple:
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
), (
_snake_case
),
) = config_and_inputs
_snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Any:
_snake_case = DebertaModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase (self ) -> Tuple:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase )
def lowercase (self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase )
def lowercase (self ) -> str:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase )
@slow
def lowercase (self ) -> Tuple:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = DebertaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowercase (self ) -> Any:
pass
@slow
def lowercase (self ) -> Dict:
_snake_case = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
_snake_case = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
# compare the actual values for a slice.
_snake_case = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" ) | 270 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.