code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase__ :Optional[Any] = logging.get_logger(__name__)
@dataclass
class lowercase :
lowercase_ : str =field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
lowercase_ : str =field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
lowercase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase_ : bool =field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A__ ( self):
lowercase = self.task_name.lower()
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] ='''train'''
lowercase_ : List[str] ='''dev'''
lowercase_ : Dict ='''test'''
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : GlueDataTrainingArguments
lowercase_ : str
lowercase_ : List[InputFeatures]
def __init__( self ,A__ ,A__ ,A__ = None ,A__ = Split.train ,A__ = None ,):
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ,A__ ,)
lowercase = args
lowercase = glue_processors[args.task_name]()
lowercase = glue_output_modes[args.task_name]
if isinstance(A__ ,A__):
try:
lowercase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
# Load data features from cache or dataset file
lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' ,)
lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + '''.lock'''
with FileLock(A__):
if os.path.exists(A__) and not args.overwrite_cache:
lowercase = time.time()
lowercase = torch.load(A__)
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' ,time.time() - start)
else:
logger.info(f'Creating features from dataset file at {args.data_dir}')
if mode == Split.dev:
lowercase = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
lowercase = self.processor.get_test_examples(args.data_dir)
else:
lowercase = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
lowercase = examples[:limit_length]
lowercase = glue_convert_examples_to_features(
A__ ,A__ ,max_length=args.max_seq_length ,label_list=A__ ,output_mode=self.output_mode ,)
lowercase = time.time()
torch.save(self.features ,A__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self):
return len(self.features)
def __getitem__( self ,A__):
return self.features[i]
def A__ ( self):
return self.label_list
| 101 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE : Optional[Any] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
def UpperCamelCase( __UpperCamelCase : int ):
if n == 1 or not isinstance(__UpperCamelCase ,__UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase_ : List[str] = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase( __UpperCamelCase : int ):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Tuple = 2
while digits < n:
index += 1
lowerCAmelCase_ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def UpperCamelCase( __UpperCamelCase : int = 1000 ):
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 103 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=18 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Optional[int]=400 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
_snake_case : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_snake_case : Optional[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : int = num_channels
_snake_case : List[Any] = image_size
_snake_case : Dict = min_resolution
_snake_case : List[Any] = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Any = size
_snake_case : str = do_center_crop
_snake_case : Union[str, Any] = crop_size
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = MobileNetVaImageProcessingTester(self)
@property
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase , """size"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase , """crop_size"""))
def UpperCamelCase_ ( self : List[str]) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : Dict = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : str = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : int = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase__ : VQModel ,lowercase__ : UNetaDModel ,lowercase__ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=lowercase__ ,unet=lowercase__ ,scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : Tuple ,lowercase__ : int = 1 ,lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowercase__ : float = 0.0 ,lowercase__ : int = 5_0 ,lowercase__ : Optional[str] = "pil" ,lowercase__ : bool = True ,**lowercase__ : List[str] ,):
__lowercase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=lowercase__ ,)
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__lowercase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__lowercase = self.scheduler.scale_model_input(lowercase__ ,lowercase__ )
# predict the noise residual
__lowercase = self.unet(lowercase__ ,lowercase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
# decode the image latents with the VAE
__lowercase = self.vqvae.decode(lowercase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0 ,1 )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ )
| 104 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a : List[str] = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ) ->int:
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class __UpperCamelCase :
lowerCamelCase : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase : Optional[str] =field(
default=a__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCamelCase :
lowerCamelCase : str =field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
lowerCamelCase : str =field(metadata={"""help""": """Should contain the data files for the task."""} )
lowerCamelCase : int =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCamelCase : bool =field(
default=a__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _SCREAMING_SNAKE_CASE ( ) ->str:
'''simple docstring'''
a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a, a, a : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
a : Union[str, Any] = processors[data_args.task_name]()
a : Any = processor.get_labels()
a : Optional[Any] = len(_lowercase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
a : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
a : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase : EvalPrediction ) -> Dict:
a : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
a : List[Any] = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a : Dict = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a : str = trainer.evaluate()
a : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(_lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _lowercase , _lowercase )
writer.write("%s = %s\n" % (key, value) )
results.update(_lowercase )
return results
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 105 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ["""names""", """prefix"""]
a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a__ = ["""encoding_errors""", """on_bad_lines"""]
a__ = ["""date_format"""]
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : str = ","
snake_case_ : Optional[str] = None
snake_case_ : Optional[Union[int, List[int], str]] = "infer"
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case_ : Optional[Union[List[int], List[str]]] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case_ : Optional[list] = None
snake_case_ : Optional[list] = None
snake_case_ : bool = False
snake_case_ : Optional[Union[int, List[int]]] = None
snake_case_ : Optional[int] = None
snake_case_ : Optional[Union[str, List[str]]] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[str] = None
snake_case_ : str = "."
snake_case_ : Optional[str] = None
snake_case_ : str = '"'
snake_case_ : int = 0
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : int = 0
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : Optional[str] = None
snake_case_ : int = 1_00_00
snake_case_ : Optional[datasets.Features] = None
snake_case_ : Optional[str] = "strict"
snake_case_ : Literal["error", "warn", "skip"] = "error"
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.delimiter is not None:
_snake_case : str = self.delimiter
if self.column_names is not None:
_snake_case : str = self.column_names
@property
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Union[str, Any] = CsvConfig
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : int = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : int = [files]
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : List[str] = [files]
_snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_snake_case : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()):
# cheaper cast
_snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
_snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase):
_snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''')
raise
| 317 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )]
lowerCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(A_ ) <= key:
return input_string
for position, character in enumerate(A_ ):
lowerCAmelCase__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A_ )
lowerCAmelCase__ : str = [''''''.join(A_ ) for row in temp_grid]
lowerCAmelCase__ : int = ''''''.join(A_ )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : int = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
lowerCAmelCase__ : list[list[str]] = [[] for _ in range(A_ )] # generates template
for position in range(len(A_ ) ):
lowerCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Dict = min(A_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
lowerCAmelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase__ : List[Any] = input_string[counter : counter + len(A_ )]
grid.append(list(A_ ) )
counter += len(A_ )
lowerCAmelCase__ : Tuple = '''''' # reads as zigzag
for position in range(len(A_ ) ):
lowerCAmelCase__ : Tuple = position % (lowest * 2) # puts it in bounds
lowerCAmelCase__ : Optional[int] = min(A_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = {}
for key_guess in range(1 , len(A_ ) ): # tries every key
lowerCAmelCase__ : List[str] = decrypt(A_ , A_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """M-CLIP"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any]=10_24 , __lowerCamelCase : Union[str, Any]=7_68 , **__lowerCamelCase : int ) -> Optional[Any]:
a = transformerDimSize
a = imageDimSize
super().__init__(**__lowerCamelCase )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = MCLIPConfig
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
super().__init__(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
a = XLMRobertaModel(__lowerCamelCase )
a = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Any ) -> Tuple:
a = self.transformer(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCamelCase ), embs
| 107 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def a__ ( SCREAMING_SNAKE_CASE : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).lstrip("./" )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return f"""{i * " "}*""" if i else "\n##"
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(SCREAMING_SNAKE_CASE )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def a__ ( SCREAMING_SNAKE_CASE : str = "." ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = os.path.split(SCREAMING_SNAKE_CASE )
if filepath != old_path:
lowerCAmelCase : Dict = print_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase : Union[str, Any] = f"""{filepath}/{filename}""".replace(" " , "%20" )
lowerCAmelCase : Union[str, Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"""{md_prefix(SCREAMING_SNAKE_CASE )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 108 |
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list:
_snake_case : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Optional[int] = j
return prefix_result
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 109 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 0 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = """"""
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return data[1:] + data[0]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = """"""
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = int('0b' + data[0] + data[-1] , 2 )
snake_case_ = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = message[:4]
snake_case_ = message[4:]
snake_case_ = apply_table(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = xor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = apply_sbox(SCREAMING_SNAKE_CASE__ , temp[:4] ) # noqa: E741
snake_case_ = apply_sbox(SCREAMING_SNAKE_CASE__ , temp[4:] )
snake_case_ = """0""" * (2 - len(SCREAMING_SNAKE_CASE__ )) + l # noqa: E741
snake_case_ = """0""" * (2 - len(SCREAMING_SNAKE_CASE__ )) + r
snake_case_ = apply_table(l + r , SCREAMING_SNAKE_CASE__ )
snake_case_ = xor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return temp + right
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = input("""Enter 10 bit key: """)
_UpperCAmelCase : Tuple = input("""Enter 8 bit message: """)
_UpperCAmelCase : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
_UpperCAmelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_UpperCAmelCase : Union[str, Any] = [2, 4, 3, 1]
_UpperCAmelCase : str = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCAmelCase : int = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCAmelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCAmelCase : List[str] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCAmelCase : Optional[Any] = apply_table(key, paa_table)
_UpperCAmelCase : str = temp[:5]
_UpperCAmelCase : Any = temp[5:]
_UpperCAmelCase : Optional[Any] = left_shift(left)
_UpperCAmelCase : Dict = left_shift(right)
_UpperCAmelCase : str = apply_table(left + right, pa_table)
_UpperCAmelCase : int = left_shift(left)
_UpperCAmelCase : Optional[int] = left_shift(right)
_UpperCAmelCase : List[Any] = left_shift(left)
_UpperCAmelCase : Any = left_shift(right)
_UpperCAmelCase : List[str] = apply_table(left + right, pa_table)
# encryption
_UpperCAmelCase : Any = apply_table(message, IP)
_UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : str = temp[4:] + temp[:4]
_UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Dict = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_UpperCAmelCase : str = apply_table(CT, IP)
_UpperCAmelCase : List[str] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Tuple = temp[4:] + temp[:4]
_UpperCAmelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
'''simple docstring'''
from math import isqrt
def _lowerCAmelCase ( __snake_case : int ) -> list[int]:
__A : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__A : List[str] = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE__ ) if is_prime[i]]
def _lowerCAmelCase ( __snake_case : int = 10**8 ) -> int:
__A : int = calculate_prime_numbers(max_number // 2 )
__A : List[str] = 0
__A : List[Any] = 0
__A : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 190 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
def A_ ( a ):
"""simple docstring"""
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = len(str(fibonacci(SCREAMING_SNAKE_CASE__ ) ) )
return index
def A_ ( a = 1_0_0_0 ):
"""simple docstring"""
return fibonacci_digits_index(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 253 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=SCREAMING_SNAKE_CASE__ , default="microsoft/unixcoder-base-nine")
parser.add_argument("--num_epochs" , type=SCREAMING_SNAKE_CASE__ , default=5)
parser.add_argument("--batch_size" , type=SCREAMING_SNAKE_CASE__ , default=6)
parser.add_argument("--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE__ , default=1)
parser.add_argument("--freeze" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__)
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE__ , default=5e-4)
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE__ , default=0)
parser.add_argument("--lr_scheduler_type" , type=SCREAMING_SNAKE_CASE__ , default="cosine")
parser.add_argument("--num_warmup_steps" , type=SCREAMING_SNAKE_CASE__ , default=10)
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE__ , default=0.0_1)
parser.add_argument("--output_dir" , type=SCREAMING_SNAKE_CASE__ , default="./results")
return parser.parse_args()
lowercase : Dict = load('accuracy')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = eval_pred
__UpperCamelCase : List[str] = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1)
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self :Tuple , a :str ) -> None:
super().__init__()
__UpperCamelCase : Any = trainer
def _lowerCamelCase ( self :Dict , a :Union[str, Any] , a :int , a :List[Any] , **a :int ) -> Tuple:
if control.should_evaluate:
__UpperCamelCase : str = deepcopy(a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCamelCase : str = get_args()
set_seed(args.seed)
__UpperCamelCase : int = load_dataset("codeparrot/codecomplex" , split="train")
__UpperCamelCase : str = dataset.train_test_split(test_size=0.2)
__UpperCamelCase : Dict = train_test["""test"""].train_test_split(test_size=0.5)
__UpperCamelCase : Optional[Any] = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
})
print("Loading tokenizer and model")
__UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
__UpperCamelCase : List[Any] = tokenizer.eos_token
__UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7)
__UpperCamelCase : Optional[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"])))
def tokenize(_lowerCamelCase : Dict):
__UpperCamelCase : int = tokenizer(example["src"] , truncation=SCREAMING_SNAKE_CASE__ , max_length=1_024)
__UpperCamelCase : Tuple = labels.straint(example["complexity"])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__UpperCamelCase : str = train_test_validation.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=train_test_validation["train"].column_names , )
__UpperCamelCase : Union[str, Any] = DataCollatorWithPadding(tokenizer=SCREAMING_SNAKE_CASE__)
__UpperCamelCase : Dict = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
__UpperCamelCase : List[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , )
print("Training...")
trainer.add_callback(CustomCallback(SCREAMING_SNAKE_CASE__))
trainer.train()
if __name__ == "__main__":
main()
| 232 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ="▁"
__lowerCAmelCase : Optional[Any] ={"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase : List[Any] ={
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
__lowerCAmelCase : Optional[int] ={
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
__lowerCAmelCase : int =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = ["""input_ids""", """attention_mask"""]
__lowercase = []
__lowercase = []
def __init__( self :Any , lowercase_ :Dict , lowercase_ :Dict=None , lowercase_ :Optional[int]=None , lowercase_ :str="</s>" , lowercase_ :List[Any]="</s>" , lowercase_ :Any="<s>" , lowercase_ :Union[str, Any]="<unk>" , lowercase_ :Any="<pad>" , lowercase_ :Optional[int]="<mask>" , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Optional[Any] , )-> None:
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase_ , tgt_lang=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model )
A__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase_ )
}
A__ = {v: k for k, v in self.lang_code_to_id.items()}
A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ = src_lang if src_lang is not None else """en_XX"""
A__ = self.lang_code_to_id[self._src_lang]
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self :Optional[int] )-> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase_ ( self :List[Any] )-> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str )-> None:
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self :List[Any] )-> Dict:
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self :Any , lowercase_ :Dict )-> None:
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self :int )-> Dict:
A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :str )-> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self :int , lowercase_ :str )-> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self :Dict , lowercase_ :int )-> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Any )-> List[str]:
A__ = []
A__ = """"""
A__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
A__ = True
A__ = []
else:
current_sub_tokens.append(lowercase_ )
A__ = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self :int , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCAmelCase_ ( self :Any , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self :int , lowercase_ :str , lowercase_ :str , lowercase_ :Optional[str] , lowercase_ :Optional[str] , **lowercase_ :Optional[Any] )-> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A__ = src_lang
A__ = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
A__ = self.convert_tokens_to_ids(lowercase_ )
A__ = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :str = "en_XX" , lowercase_ :Optional[List[str]] = None , lowercase_ :str = "ro_RO" , **lowercase_ :Dict , )-> BatchEncoding:
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Dict )-> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self :int )-> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str )-> None:
A__ = self.lang_code_to_id[src_lang]
A__ = [self.cur_lang_code_id]
A__ = [self.eos_token_id]
def UpperCAmelCase_ ( self :Dict , lowercase_ :str )-> None:
A__ = self.lang_code_to_id[tgt_lang]
A__ = [self.cur_lang_code_id]
A__ = [self.eos_token_id]
| 237 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self : str) -> str:
"""simple docstring"""
_snake_case : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : int = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : str = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : int = self.get_env()
_snake_case : List[str] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_snake_case : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_snake_case : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Any = self.get_env()
_snake_case : Dict = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# next emulate no network
_snake_case : List[Any] = [sys.executable, """-c""", """\n""".join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : int = """1"""
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
_snake_case : Dict = """
from transformers import pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_snake_case : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_snake_case : Tuple = self.get_env()
_snake_case : Union[str, Any] = """1"""
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, mock, run])]
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""") , )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = """
from transformers import AutoModel
"""
_snake_case : Union[str, Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_snake_case : Any = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Union[str, Any] = self.get_env()
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : List[Any] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
| 317 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a_ = logging.get_logger(__name__)
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , *snake_case : List[Any] , **snake_case : Dict ) -> None:
"""simple docstring"""
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 175 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a__ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
inspect_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
inspect_metric(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
_snake_case : Dict = get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
_snake_case : Optional[Any] = get_dataset_config_names(SCREAMING_SNAKE_CASE__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
_snake_case : Union[str, Any] = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert list(infos.keys() ) == expected_configs
_snake_case : Optional[int] = expected_configs[0]
assert expected_config in infos
_snake_case : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
_snake_case : Dict = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert expected_config in infos
_snake_case : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
| 317 | 0 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : str = "▁" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[str, AddedToken] = "<unk>" , lowerCAmelCase__ : Union[str, AddedToken] = "</s>" , lowerCAmelCase__ : Union[str, AddedToken] = "<pad>" , ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
_UpperCAmelCase : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_UpperCAmelCase : List[Any] = token_dict["""token"""]
_UpperCAmelCase : int = Tokenizer(Unigram() )
_UpperCAmelCase : Union[str, Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
_UpperCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ ),
pre_tokenizers.Digits(individual_digits=lowerCAmelCase__ ),
pre_tokenizers.Punctuation(),
] )
_UpperCAmelCase : Optional[int] = decoders.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_UpperCAmelCase : Any = TemplateProcessing(
single=F"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
_UpperCAmelCase : str = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 8_0_0_0 , lowerCAmelCase__ : bool = True , ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = [files]
self._tokenizer.train(lowerCAmelCase__ , trainer=lowerCAmelCase__ )
self.add_unk_id()
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , lowerCAmelCase__ : int = 8_0_0_0 , lowerCAmelCase__ : bool = True , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , )
self._tokenizer.train_from_iterator(lowerCAmelCase__ , trainer=lowerCAmelCase__ )
self.add_unk_id()
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = json.loads(self._tokenizer.to_str() )
_UpperCAmelCase : Tuple = self.special_tokens["""unk"""]["""id"""]
_UpperCAmelCase : Any = Tokenizer.from_str(json.dumps(lowerCAmelCase__ ) )
| 145 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ : Any = logging.get_logger(__name__)
# General docstring
a_ : List[Any] = 'RegNetConfig'
# Base docstring
a_ : Optional[int] = 'facebook/regnet-y-040'
a_ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
a_ : Optional[int] = 'facebook/regnet-y-040'
a_ : List[Any] = 'tabby, tabby cat'
a_ : Tuple = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( nn.Module ):
def __init__( self , a , a , a = 3 , a = 1 , a = 1 , a = "relu" , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
a , a , kernel_size=a , stride=a , padding=kernel_size // 2 , groups=a , bias=a , )
SCREAMING_SNAKE_CASE = nn.BatchNormad(a)
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.convolution(a)
SCREAMING_SNAKE_CASE = self.normalization(a)
SCREAMING_SNAKE_CASE = self.activation(a)
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
SCREAMING_SNAKE_CASE = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
SCREAMING_SNAKE_CASE = self.embedder(a)
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a , a , a = 2) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(a , a , kernel_size=1 , stride=a , bias=a)
SCREAMING_SNAKE_CASE = nn.BatchNormad(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tensor:
SCREAMING_SNAKE_CASE = self.convolution(a)
SCREAMING_SNAKE_CASE = self.normalization(a)
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a , a) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1))
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Convad(a , a , kernel_size=1) , nn.ReLU() , nn.Convad(a , a , kernel_size=1) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.pooler(a)
SCREAMING_SNAKE_CASE = self.attention(a)
SCREAMING_SNAKE_CASE = hidden_state * attention
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a , a , a , a = 1) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width)
SCREAMING_SNAKE_CASE = (
RegNetShortCut(a , a , stride=a) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(a , a , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(a , a , stride=a , groups=a , activation=config.hidden_act) , RegNetConvLayer(a , a , kernel_size=1 , activation=a) , )
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(a)
SCREAMING_SNAKE_CASE = self.shortcut(a)
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(a)
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a , a , a , a = 1) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width)
SCREAMING_SNAKE_CASE = (
RegNetShortCut(a , a , stride=a) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(a , a , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(a , a , stride=a , groups=a , activation=config.hidden_act) , RegNetSELayer(a , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(a , a , kernel_size=1 , activation=a) , )
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(a)
SCREAMING_SNAKE_CASE = self.shortcut(a)
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(a)
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a , a , a , a = 2 , a = 2 , ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
a , a , a , stride=a , ) , *[layer(a , a , a) for _ in range(depth - 1)] , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = self.layers(a)
return hidden_state
class _snake_case ( nn.Module ):
def __init__( self , a) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(a , config.depths[1:]):
self.stages.append(RegNetStage(a , a , a , depth=a))
def SCREAMING_SNAKE_CASE__ ( self , a , a = False , a = True) -> BaseModelOutputWithNoAttention:
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(a)
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=a , hidden_states=a)
class _snake_case ( SCREAMING_SNAKE_CASE_ ):
_lowercase : Optional[Any] = RegNetConfig
_lowercase : List[Any] = """regnet"""
_lowercase : Any = """pixel_values"""
_lowercase : Optional[Any] = True
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
if isinstance(a , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(a , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def SCREAMING_SNAKE_CASE__ ( self , a , a=False) -> Optional[int]:
if isinstance(a , a):
SCREAMING_SNAKE_CASE = value
a_ : Optional[Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ : List[str] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a) -> Dict:
super().__init__(a)
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = RegNetEmbeddings(a)
SCREAMING_SNAKE_CASE = RegNetEncoder(a)
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None) -> BaseModelOutputWithPoolingAndNoAttention:
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(a)
SCREAMING_SNAKE_CASE = self.encoder(
a , output_hidden_states=a , return_dict=a)
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(a)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a , pooler_output=a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a) -> Tuple:
super().__init__(a)
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = RegNetModel(a)
# classification head
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , a = None , a = None , a = None , a = None , ) -> ImageClassifierOutputWithNoAttention:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(a , output_hidden_states=a , return_dict=a)
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(a)
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE = loss_fct(a , a)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(a , a)
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a , logits=a , hidden_states=outputs.hidden_states)
| 137 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : str = field(default='image-classification' ,metadata={'include_in_asdict_even_if_is_default': True} )
lowerCamelCase__ : ClassVar[Features] = Features({'image': Image()} )
lowerCamelCase__ : ClassVar[Features] = Features({'labels': ClassLabel} )
lowerCamelCase__ : str = "image"
lowerCamelCase__ : str = "labels"
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Tuple ) -> Tuple:
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 165 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : str , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
| 317 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = """"""
__lowercase = """"""
__lowercase = """"""
__lowercase = 250
def lowercase ( )-> None:
'''simple docstring'''
a : Union[str, Any] = get_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for index in range(SCREAMING_SNAKE_CASE__ ):
a : List[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) , 4 )
a : str = update_image_and_anno(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , filter_scale=SCREAMING_SNAKE_CASE__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a : Optional[int] = random_chars(32 )
a : Union[str, Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0]
a : List[Any] = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , SCREAMING_SNAKE_CASE__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
a : List[str] = []
for anno in new_annos:
a : str = anno[3] - anno[1]
a : str = anno[4] - anno[2]
a : Any = anno[1] + width / 2
a : List[str] = anno[2] + height / 2
a : Tuple = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(SCREAMING_SNAKE_CASE__ )
with open(F'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase ( A_ , A_ )-> tuple[list, list]:
'''simple docstring'''
a : List[str] = []
a : List[str] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , "*.txt" ) ):
a : Optional[int] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(SCREAMING_SNAKE_CASE__ ) as in_file:
a : Optional[Any] = in_file.readlines()
a : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{label_name}.jpg''' )
a : List[str] = []
for obj_list in obj_lists:
a : Optional[int] = obj_list.rstrip("\n" ).split(" " )
a : Dict = float(obj[1] ) - float(obj[3] ) / 2
a : List[str] = float(obj[2] ) - float(obj[4] ) / 2
a : Any = float(obj[1] ) + float(obj[3] ) / 2
a : int = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE__ )
labels.append(SCREAMING_SNAKE_CASE__ )
return img_paths, labels
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ = 0.0 , )-> tuple[list, list, str]:
'''simple docstring'''
a : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a : Any = int(scale_x * output_size[1] )
a : List[Any] = int(scale_y * output_size[0] )
a : Tuple = []
a : Tuple = []
for i, index in enumerate(SCREAMING_SNAKE_CASE__ ):
a : int = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE__ )
a : Dict = all_annos[index]
a : Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE__ )
if i == 0: # top-left
a : Any = cva.resize(SCREAMING_SNAKE_CASE__ , (divid_point_x, divid_point_y) )
a : Optional[Any] = img
for bbox in img_annos:
a : List[str] = bbox[1] * scale_x
a : List[str] = bbox[2] * scale_y
a : List[str] = bbox[3] * scale_x
a : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a : int = cva.resize(SCREAMING_SNAKE_CASE__ , (output_size[1] - divid_point_x, divid_point_y) )
a : str = img
for bbox in img_annos:
a : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
a : List[str] = bbox[2] * scale_y
a : List[str] = scale_x + bbox[3] * (1 - scale_x)
a : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a : Optional[int] = cva.resize(SCREAMING_SNAKE_CASE__ , (divid_point_x, output_size[0] - divid_point_y) )
a : str = img
for bbox in img_annos:
a : Union[str, Any] = bbox[1] * scale_x
a : List[Any] = scale_y + bbox[2] * (1 - scale_y)
a : List[Any] = bbox[3] * scale_x
a : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a : str = cva.resize(
SCREAMING_SNAKE_CASE__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a : Any = img
for bbox in img_annos:
a : List[str] = scale_x + bbox[1] * (1 - scale_x)
a : List[str] = scale_y + bbox[2] * (1 - scale_y)
a : List[Any] = scale_x + bbox[3] * (1 - scale_x)
a : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase ( A_ )-> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
a : int = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 40 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """efficientnet"""
def __init__( self : List[Any] , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 600 , lowerCAmelCase : float = 2.0 , lowerCAmelCase : float = 3.1 , lowerCAmelCase : int = 8 , lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase : List[int] = [] , lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase : float = 0.25 , lowerCAmelCase : str = "swish" , lowerCAmelCase : int = 2560 , lowerCAmelCase : str = "mean" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 0.001 , lowerCAmelCase : float = 0.99 , lowerCAmelCase : float = 0.5 , lowerCAmelCase : float = 0.2 , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[int] = num_channels
_snake_case : str = image_size
_snake_case : Tuple = width_coefficient
_snake_case : List[str] = depth_coefficient
_snake_case : List[Any] = depth_divisor
_snake_case : str = kernel_sizes
_snake_case : Any = in_channels
_snake_case : Optional[Any] = out_channels
_snake_case : str = depthwise_padding
_snake_case : Tuple = strides
_snake_case : Dict = num_block_repeats
_snake_case : int = expand_ratios
_snake_case : Tuple = squeeze_expansion_ratio
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : Tuple = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = batch_norm_eps
_snake_case : Optional[Any] = batch_norm_momentum
_snake_case : str = dropout_rate
_snake_case : Union[str, Any] = drop_connect_rate
_snake_case : Optional[int] = sum(lowerCAmelCase) * 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 1E-5
| 317 | 0 |
import pytest
__UpperCAmelCase = '''__dummy_dataset1__'''
__UpperCAmelCase = '''
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ) -> Optional[int]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ) -> Dict:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ) -> Optional[int]:
UpperCamelCase : List[Any] = dataset_loading_script_name
UpperCamelCase : List[str] = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE__ )
UpperCamelCase : str = script_dir / F"""{script_name}.py"""
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
| 119 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 0 |
_UpperCAmelCase : List[str] = {
"""km/h""": 1.0,
"""m/s""": 3.6,
"""mph""": 1.609_344,
"""knot""": 1.852,
}
_UpperCAmelCase : Any = {
"""km/h""": 1.0,
"""m/s""": 0.277_777_778,
"""mph""": 0.621_371_192,
"""knot""": 0.539_956_803,
}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {", ".join(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317 | 0 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _lowerCAmelCase ( __snake_case : Features ) -> Optional[int]:
__A : Union[str, Any] = np.inf
def set_batch_size(__snake_case : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__A : str = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__A : int = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__A : Tuple = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Tuple = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase) else {self.split: path_or_paths}
__A : Optional[int] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
__A : Union[str, Any] = Parquet(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , hash=_UpperCAmelCase , **_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.streaming:
__A : Any = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__A : List[Any] = None
__A : List[str] = None
__A : Optional[int] = None
__A : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
__A : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory)
return dataset
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = dataset
__A : List[Any] = path_or_buf
__A : List[Any] = batch_size or get_writer_batch_size(dataset.features)
__A : List[str] = parquet_writer_kwargs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , 'wb+') as buffer:
__A : int = self._write(file_obj=_UpperCAmelCase , batch_size=_UpperCAmelCase , **self.parquet_writer_kwargs)
else:
__A : List[Any] = self._write(file_obj=self.path_or_buf , batch_size=_UpperCAmelCase , **self.parquet_writer_kwargs)
return written
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : Tuple = 0
__A : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , _UpperCAmelCase)
__A : Tuple = self.dataset.features.arrow_schema
__A : Dict = pq.ParquetWriter(_UpperCAmelCase , schema=_UpperCAmelCase , **_UpperCAmelCase)
for offset in logging.tqdm(
range(0 , len(self.dataset) , _UpperCAmelCase) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__A : Union[str, Any] = query_table(
table=self.dataset._data , key=slice(_UpperCAmelCase , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_UpperCAmelCase)
written += batch.nbytes
writer.close()
return written
| 190 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Tuple = field
_snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths}
_snake_case : int = Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
if self.streaming:
_snake_case : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
_snake_case : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
_snake_case : Optional[Any] = dataset
_snake_case : str = path_or_buf
_snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : Tuple = num_proc
_snake_case : Dict = """utf-8"""
_snake_case : str = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase)
_snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""")
_snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False)
_snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True)
_snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer:
_snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""")
_snake_case : Tuple = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
return written
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args
_snake_case : int = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_snake_case : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase)
if not json_str.endswith("""\n"""):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(lowerCAmelCase)
else:
_snake_case , _snake_case : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase)
return written
| 317 | 0 |
import requests
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {"""Content-Type""": """application/json"""}
SCREAMING_SNAKE_CASE_ : Tuple = requests.post(SCREAMING_SNAKE_CASE__ , json={'text': message_body} , headers=SCREAMING_SNAKE_CASE__ )
if response.status_code != 2_0_0:
SCREAMING_SNAKE_CASE_ : Dict = (
"""Request to slack returned an error """
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 253 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=False) -> Dict:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__UpperCamelCase : str = len(set_a.intersection(SCREAMING_SNAKE_CASE__))
if alternative_union:
__UpperCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__)
else:
__UpperCamelCase : List[Any] = len(set_a.union(SCREAMING_SNAKE_CASE__))
return intersection / union
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple)) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple)):
__UpperCamelCase : Any = [element for element in set_a if element in set_b]
if alternative_union:
__UpperCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__) + len(SCREAMING_SNAKE_CASE__)
return len(SCREAMING_SNAKE_CASE__) / union
else:
__UpperCamelCase : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(SCREAMING_SNAKE_CASE__) / len(SCREAMING_SNAKE_CASE__)
return len(SCREAMING_SNAKE_CASE__) / len(SCREAMING_SNAKE_CASE__)
return None
if __name__ == "__main__":
lowercase : Union[str, Any] = {'a', 'b', 'c', 'd', 'e'}
lowercase : Union[str, Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 232 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 237 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=18 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Optional[int]=400 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
_snake_case : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_snake_case : Optional[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : int = num_channels
_snake_case : List[Any] = image_size
_snake_case : Dict = min_resolution
_snake_case : List[Any] = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Any = size
_snake_case : str = do_center_crop
_snake_case : Union[str, Any] = crop_size
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = MobileNetVaImageProcessingTester(self)
@property
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase , """size"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase , """crop_size"""))
def UpperCamelCase_ ( self : List[str]) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : Dict = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : str = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : int = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317 | 0 |
def __lowercase ( lowerCamelCase : int = 600851475143 ):
try:
UpperCamelCase_ : int = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : List[Any] = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase_ : Optional[int] = i
n //= i
i += 1
if n > 1:
UpperCamelCase_ : Optional[int] = n
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 175 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__a = False
class A__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : str = pipe(
image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 145 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ["""names""", """prefix"""]
a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a__ = ["""encoding_errors""", """on_bad_lines"""]
a__ = ["""date_format"""]
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : str = ","
snake_case_ : Optional[str] = None
snake_case_ : Optional[Union[int, List[int], str]] = "infer"
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case_ : Optional[Union[List[int], List[str]]] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case_ : Optional[list] = None
snake_case_ : Optional[list] = None
snake_case_ : bool = False
snake_case_ : Optional[Union[int, List[int]]] = None
snake_case_ : Optional[int] = None
snake_case_ : Optional[Union[str, List[str]]] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[str] = None
snake_case_ : str = "."
snake_case_ : Optional[str] = None
snake_case_ : str = '"'
snake_case_ : int = 0
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : int = 0
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : Optional[str] = None
snake_case_ : int = 1_00_00
snake_case_ : Optional[datasets.Features] = None
snake_case_ : Optional[str] = "strict"
snake_case_ : Literal["error", "warn", "skip"] = "error"
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.delimiter is not None:
_snake_case : str = self.delimiter
if self.column_names is not None:
_snake_case : str = self.column_names
@property
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Union[str, Any] = CsvConfig
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : int = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : int = [files]
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : List[str] = [files]
_snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_snake_case : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()):
# cheaper cast
_snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
_snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase):
_snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''')
raise
| 317 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def lowerCamelCase__ (_UpperCAmelCase):
if hor == 128:
SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
SCREAMING_SNAKE_CASE = (32, 128, 256)
SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
SCREAMING_SNAKE_CASE = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
SCREAMING_SNAKE_CASE = model.state_dict()
SCREAMING_SNAKE_CASE = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
SCREAMING_SNAKE_CASE = UNetaDModel(**SCREAMING_SNAKE_CASE__)
print(F'''length of state dict: {len(state_dict.keys())}''')
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE__)
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__)
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w') as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
SCREAMING_SNAKE_CASE = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch')
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = UNetaDModel(**SCREAMING_SNAKE_CASE__)
print(F'''length of state dict: {len(state_dict.keys())}''')
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE__)
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__)
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin')
with open('hub/hopper-medium-v2/value_function/config.json' , 'w') as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 137 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A_ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ):
def __init__( self : Tuple , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Union[str, Any] ) -> None:
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 165 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowercase ( A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Tuple = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
a : str = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def lowercase ( A_ , A_ )-> List[Any]:
'''simple docstring'''
a : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
dataset_info.write_to_directory(SCREAMING_SNAKE_CASE__ )
a : str = DatasetInfo.from_directory(SCREAMING_SNAKE_CASE__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "dataset_info.json" ) )
def lowercase ( )-> Any:
'''simple docstring'''
a : Tuple = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
a : str = dataset_info._to_yaml_dict()
assert sorted(SCREAMING_SNAKE_CASE__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
a : Optional[int] = yaml.safe_dump(SCREAMING_SNAKE_CASE__ )
a : Union[str, Any] = yaml.safe_load(SCREAMING_SNAKE_CASE__ )
assert dataset_info_yaml_dict == reloaded
def lowercase ( )-> Optional[int]:
'''simple docstring'''
a : Union[str, Any] = DatasetInfo()
a : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
a : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE__ )
a : Union[str, Any] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
a : Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
a : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "README.md" ) )
| 40 |
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list:
_snake_case : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Optional[int] = j
return prefix_result
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 | 0 |
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE__ ) )
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> bool:
# Base Case
if index == len(SCREAMING_SNAKE_CASE__ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE__ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Color current vertex
UpperCamelCase : int = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ):
return True
# Backtrack
UpperCamelCase : List[Any] = -1
return False
def UpperCamelCase ( snake_case__ : list[list[int]] , snake_case__ : int ) -> list[int]:
UpperCamelCase : Tuple = [-1] * len(SCREAMING_SNAKE_CASE__ )
if util_color(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 ):
return colored_vertices
return []
| 119 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_UpperCAmelCase : Optional[Any] = """src/transformers"""
_UpperCAmelCase : int = """docs/source/en/tasks"""
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
# Find the start prompt.
snake_case_ = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE__ ):
start_index += 1
start_index += 1
snake_case_ = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : int = direct_transformers_import(TRANSFORMERS_PATH)
_UpperCAmelCase : Any = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_UpperCAmelCase : Tuple = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = TASK_GUIDE_TO_MODELS[task_guide]
snake_case_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(SCREAMING_SNAKE_CASE__ , set() )
snake_case_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
snake_case_ = get_model_list_for_task(SCREAMING_SNAKE_CASE__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
'''simple docstring'''
from __future__ import annotations
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = text, pattern
__A : Optional[Any] = len(_UpperCAmelCase), len(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = []
for i in range(self.textLen - self.patLen + 1):
__A : Optional[int] = self.mismatch_in_text(_UpperCAmelCase)
if mismatch_index == -1:
positions.append(_UpperCAmelCase)
else:
__A : Dict = self.match_in_pattern(self.text[mismatch_index])
__A : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase__ : List[str] = '''ABAABA'''
lowercase__ : str = '''AB'''
lowercase__ : Optional[int] = BoyerMooreSearch(text, pattern)
lowercase__ : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 190 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
import math
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> list:
'''simple docstring'''
__UpperCamelCase : Optional[int] = [True] * n
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = True
for i in range(3 , int(n**0.5 + 1) , 2):
__UpperCamelCase : Tuple = i * 2
while index < n:
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[Any] = index + i
__UpperCamelCase : Tuple = [2]
for i in range(3 , SCREAMING_SNAKE_CASE__ , 2):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE__)
return primes
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 999_966_663_333) -> int:
'''simple docstring'''
__UpperCamelCase : Any = math.floor(math.sqrt(SCREAMING_SNAKE_CASE__)) + 100
__UpperCamelCase : List[str] = prime_sieve(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = primes[prime_index]
while (last_prime**2) <= limit:
__UpperCamelCase : List[str] = primes[prime_index + 1]
__UpperCamelCase : str = last_prime**2
__UpperCamelCase : List[Any] = next_prime**2
# Get numbers divisible by lps(current)
__UpperCamelCase : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__UpperCamelCase : Tuple = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__UpperCamelCase : Tuple = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__UpperCamelCase : Optional[int] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 232 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : int =OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
__lowerCAmelCase : str =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCamelCase ( _lowerCamelCase : str ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A__ = model_type_to_module_name(SCREAMING_SNAKE_CASE__ )
A__ = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE__ , "__name__" , SCREAMING_SNAKE_CASE__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A__ = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None
def UpperCamelCase ( _lowerCamelCase : Union[str, os.PathLike] , _lowerCamelCase : Optional[Union[str, os.PathLike]] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[Dict[str, str]] = None , _lowerCamelCase : Optional[Union[bool, str]] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : bool = False , **_lowerCamelCase : Union[str, Any] , ):
A__ = get_file_from_repo(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase :
def __init__( self :Any )-> List[Any]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def UpperCAmelCase_ ( cls :Dict , lowercase_ :int , **lowercase_ :List[str] )-> List[Any]:
A__ = kwargs.pop("config" , lowercase_ )
A__ = kwargs.pop("trust_remote_code" , lowercase_ )
A__ = True
A__ = FeatureExtractionMixin.get_feature_extractor_dict(lowercase_ , **lowercase_ )
A__ = config_dict.get("feature_extractor_type" , lowercase_ )
A__ = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
A__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
A__ = AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.feature_extractor_type``
A__ = getattr(lowercase_ , "feature_extractor_type" , lowercase_ )
if hasattr(lowercase_ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
A__ = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
A__ = feature_extractor_class_from_name(lowercase_ )
A__ = feature_extractor_auto_map is not None
A__ = feature_extractor_class is not None or type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING
A__ = resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
A__ = get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
A__ = kwargs.pop("code_revision" , lowercase_ )
if os.path.isdir(lowercase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowercase_ ) in FEATURE_EXTRACTOR_MAPPING:
A__ = FEATURE_EXTRACTOR_MAPPING[type(lowercase_ )]
return feature_extractor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase_ ( lowercase_ :Optional[Any] , lowercase_ :List[str] )-> Tuple:
FEATURE_EXTRACTOR_MAPPING.register(lowercase_ , lowercase_ )
| 237 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self : str) -> str:
"""simple docstring"""
_snake_case : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : int = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : str = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : int = self.get_env()
_snake_case : List[str] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_snake_case : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_snake_case : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Any = self.get_env()
_snake_case : Dict = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# next emulate no network
_snake_case : List[Any] = [sys.executable, """-c""", """\n""".join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : int = """1"""
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
_snake_case : Dict = """
from transformers import pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_snake_case : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_snake_case : Tuple = self.get_env()
_snake_case : Union[str, Any] = """1"""
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, mock, run])]
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""") , )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = """
from transformers import AutoModel
"""
_snake_case : Union[str, Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_snake_case : Any = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Union[str, Any] = self.get_env()
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : List[Any] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
| 317 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a_ = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a_ = {
'RUCAIBox/mvp': 1_024,
}
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = MvpTokenizer
def __init__( self : Optional[int] , snake_case : List[Any]=None , snake_case : Optional[Any]=None , snake_case : Any=None , snake_case : Dict="replace" , snake_case : Dict="<s>" , snake_case : Dict="</s>" , snake_case : List[Any]="</s>" , snake_case : int="<s>" , snake_case : Dict="<unk>" , snake_case : Optional[int]="<pad>" , snake_case : Any="<mask>" , snake_case : int=False , snake_case : Dict=True , **snake_case : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
UpperCamelCase_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : List[Any] = getattr(snake_case , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Dict = add_prefix_space
UpperCamelCase_ : int = pre_tok_class(**snake_case )
UpperCamelCase_ : Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCamelCase_ : Tuple = """post_processor"""
UpperCamelCase_ : Any = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
UpperCamelCase_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase_ : Any = tuple(state['sep'] )
if "cls" in state:
UpperCamelCase_ : int = tuple(state['cls'] )
UpperCamelCase_ : Union[str, Any] = False
if state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Optional[int] = add_prefix_space
UpperCamelCase_ : List[Any] = True
if state.get('trim_offsets' , snake_case ) != trim_offsets:
UpperCamelCase_ : int = trim_offsets
UpperCamelCase_ : Union[str, Any] = True
if changes_to_apply:
UpperCamelCase_ : Dict = getattr(snake_case , state.pop('type' ) )
UpperCamelCase_ : Union[str, Any] = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
UpperCamelCase_ : Any = value
def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case : Dict , **snake_case : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ : List[str] = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , *snake_case : List[str] , **snake_case : Tuple ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase_ : int = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : int , snake_case : str=None ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : str = [self.sep_token_id]
UpperCamelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 175 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a__ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
inspect_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
inspect_metric(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
_snake_case : Dict = get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
_snake_case : Optional[Any] = get_dataset_config_names(SCREAMING_SNAKE_CASE__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
_snake_case : Union[str, Any] = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert list(infos.keys() ) == expected_configs
_snake_case : Optional[int] = expected_configs[0]
assert expected_config in infos
_snake_case : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
_snake_case : Dict = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert expected_config in infos
_snake_case : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
| 317 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCamelCase_ : Dict = """funnel"""
UpperCamelCase_ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Tuple , lowerCAmelCase__ : int=3_0_5_2_2 , lowerCAmelCase__ : List[str]=[4, 4, 4] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Optional[int]=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : str=6_4 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu_new" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=1e-9 , lowerCAmelCase__ : List[Any]="mean" , lowerCAmelCase__ : Dict="relative_shift" , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : Tuple , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[Any] = block_sizes
_UpperCAmelCase : Optional[Any] = [1] * len(lowerCAmelCase__ ) if block_repeats is None else block_repeats
assert len(lowerCAmelCase__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase : Any = num_decoder_layers
_UpperCAmelCase : str = d_model
_UpperCAmelCase : Optional[Any] = n_head
_UpperCAmelCase : Optional[Any] = d_head
_UpperCAmelCase : Union[str, Any] = d_inner
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Union[str, Any] = activation_dropout
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Optional[Any] = initializer_std
_UpperCAmelCase : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported."""
_UpperCAmelCase : Tuple = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported."""
_UpperCAmelCase : Dict = attention_type
_UpperCAmelCase : Tuple = separate_cls
_UpperCAmelCase : Dict = truncate_seq
_UpperCAmelCase : List[str] = pool_q_only
super().__init__(**lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 145 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Any = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _snake_case ( SCREAMING_SNAKE_CASE_ ):
_lowercase : int = """transfo-xl"""
_lowercase : str = ["""mems"""]
_lowercase : Union[str, Any] = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , a=26_7735 , a=[2_0000, 4_0000, 20_0000] , a=1024 , a=1024 , a=16 , a=64 , a=4096 , a=4 , a=False , a=18 , a=1600 , a=1000 , a=True , a=True , a=0 , a=-1 , a=True , a=0.1 , a=0.0 , a=True , a="normal" , a=0.01 , a=0.01 , a=0.02 , a=1E-5 , a=0 , **a , ) -> Any:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = []
self.cutoffs.extend(a)
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE = [False] + [True] * len(self.cutoffs)
else:
SCREAMING_SNAKE_CASE = [False] + [False] * len(self.cutoffs)
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_embed
SCREAMING_SNAKE_CASE = d_head
SCREAMING_SNAKE_CASE = d_inner
SCREAMING_SNAKE_CASE = div_val
SCREAMING_SNAKE_CASE = pre_lnorm
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = mem_len
SCREAMING_SNAKE_CASE = same_length
SCREAMING_SNAKE_CASE = attn_type
SCREAMING_SNAKE_CASE = clamp_len
SCREAMING_SNAKE_CASE = sample_softmax
SCREAMING_SNAKE_CASE = adaptive
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = dropatt
SCREAMING_SNAKE_CASE = untie_r
SCREAMING_SNAKE_CASE = init
SCREAMING_SNAKE_CASE = init_range
SCREAMING_SNAKE_CASE = proj_init_std
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = layer_norm_epsilon
super().__init__(eos_token_id=a , **a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[int]:
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
| 137 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317 | 0 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A_ : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
inspect_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
inspect_metric(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_dataset_config_names(SCREAMING_SNAKE_CASE__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE__ = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert expected_config in infos
SCREAMING_SNAKE_CASE__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
| 165 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : str , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
| 317 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__lowercase = logging.getLogger(__name__)
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCAmelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Whether tp freeze the encoder."""} )
UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase : Optional[str] = field(
default="""summarization""" ,metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} ,)
UpperCAmelCase : Optional[int] = field(
default=1_0_2_4 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : Optional[int] = field(
default=1_2_8 ,metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : Optional[int] = field(
default=1_4_2 ,metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} ,)
UpperCAmelCase : Optional[int] = field(
default=1_4_2 ,metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase : Optional[int] = field(default=-1 ,metadata={"""help""": """# training examples. -1 means use all."""} )
UpperCAmelCase : Optional[int] = field(default=-1 ,metadata={"""help""": """# validation examples. -1 means use all."""} )
UpperCAmelCase : Optional[int] = field(default=-1 ,metadata={"""help""": """# test examples. -1 means use all."""} )
UpperCAmelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Source language id for translation."""} )
UpperCAmelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Target language id for translation."""} )
UpperCAmelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """# num_beams to use for evaluation."""} )
UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} ,)
def lowercase ( A_ , A_ , A_ )-> Any:
'''simple docstring'''
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' ) )
def lowercase ( )-> List[str]:
'''simple docstring'''
a : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : Optional[int] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a : Optional[int] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(SCREAMING_SNAKE_CASE__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a : Dict = SeqaSeqDataset
# Get datasets
a : int = (
dataset_class(
SCREAMING_SNAKE_CASE__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a : Optional[int] = (
dataset_class(
SCREAMING_SNAKE_CASE__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a : List[Any] = (
build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE__ ) if training_args.predict_with_generate else None
)
a : List[Any] = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , data_args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , data_collator=SeqaSeqDataCollator(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
a : Optional[int] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a : int = train_result.metrics
a : int = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , SCREAMING_SNAKE_CASE__ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a : List[str] = trainer.evaluate(metric_key_prefix="val" )
a : int = data_args.n_val
a : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , SCREAMING_SNAKE_CASE__ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
a : Optional[int] = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE__ , metric_key_prefix="test" )
a : str = test_output.metrics
a : Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
a : int = round(metrics["test_loss"] , 4 )
handle_metrics("test" , SCREAMING_SNAKE_CASE__ , training_args.output_dir )
all_metrics.update(SCREAMING_SNAKE_CASE__ )
if training_args.predict_with_generate:
a : int = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
a : Dict = lmap(str.strip , SCREAMING_SNAKE_CASE__ )
write_txt_file(SCREAMING_SNAKE_CASE__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(SCREAMING_SNAKE_CASE__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowercase ( A_ )-> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 40 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """efficientnet"""
def __init__( self : List[Any] , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 600 , lowerCAmelCase : float = 2.0 , lowerCAmelCase : float = 3.1 , lowerCAmelCase : int = 8 , lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase : List[int] = [] , lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase : float = 0.25 , lowerCAmelCase : str = "swish" , lowerCAmelCase : int = 2560 , lowerCAmelCase : str = "mean" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 0.001 , lowerCAmelCase : float = 0.99 , lowerCAmelCase : float = 0.5 , lowerCAmelCase : float = 0.2 , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[int] = num_channels
_snake_case : str = image_size
_snake_case : Tuple = width_coefficient
_snake_case : List[str] = depth_coefficient
_snake_case : List[Any] = depth_divisor
_snake_case : str = kernel_sizes
_snake_case : Any = in_channels
_snake_case : Optional[Any] = out_channels
_snake_case : str = depthwise_padding
_snake_case : Tuple = strides
_snake_case : Dict = num_block_repeats
_snake_case : int = expand_ratios
_snake_case : Tuple = squeeze_expansion_ratio
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : Tuple = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = batch_norm_eps
_snake_case : Optional[Any] = batch_norm_momentum
_snake_case : str = dropout_rate
_snake_case : Union[str, Any] = drop_connect_rate
_snake_case : Optional[int] = sum(lowerCAmelCase) * 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 1E-5
| 317 | 0 |
import os
import sys
__UpperCAmelCase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *snake_case__ : Any , **snake_case__ : int ) -> int:
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *snake_case__ : List[Any] , **snake_case__ : Any ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *snake_case__ : Any , **snake_case__ : Optional[Any] ) -> Optional[int]:
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *snake_case__ : int , **snake_case__ : Any ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *snake_case__ : Union[str, Any] , **snake_case__ : str ) -> Optional[Any]:
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *snake_case__ : Dict , **snake_case__ : List[str] ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *snake_case__ : int , **snake_case__ : Any ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 119 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Optional[int] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 190 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Tuple = field
_snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths}
_snake_case : int = Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
if self.streaming:
_snake_case : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
_snake_case : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
_snake_case : Optional[Any] = dataset
_snake_case : str = path_or_buf
_snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : Tuple = num_proc
_snake_case : Dict = """utf-8"""
_snake_case : str = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase)
_snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""")
_snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False)
_snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True)
_snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer:
_snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""")
_snake_case : Tuple = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
return written
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args
_snake_case : int = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_snake_case : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase)
if not json_str.endswith("""\n"""):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(lowerCAmelCase)
else:
_snake_case , _snake_case : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase)
return written
| 317 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : str = 16
lowerCAmelCase : Tuple = 32
def A_ ( a , a = 1_6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : List[str] = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : List[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Any = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : str = 8
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : int = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : int = mocked_dataloaders # noqa: F811
def A_ ( a , a ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
SCREAMING_SNAKE_CASE_ : int = 2
# New Code #
SCREAMING_SNAKE_CASE_ : Tuple = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config["""lr"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE_ : Any = int(config['seed'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(config['batch_size'] )
SCREAMING_SNAKE_CASE_ : Any = evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : List[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : Optional[int] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Tuple = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , local_sgd_steps=SCREAMING_SNAKE_CASE__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Tuple = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE__ )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=SCREAMING_SNAKE_CASE__ , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
SCREAMING_SNAKE_CASE_ : str = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 253 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
import os
lowercase : List[Any] = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : List[str] = 0
while index < len(SCREAMING_SNAKE_CASE__) - 1:
__UpperCamelCase : List[str] = SYMBOLS[numerals[index]]
__UpperCamelCase : List[Any] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str:
'''simple docstring'''
__UpperCamelCase : Tuple = """"""
__UpperCamelCase : Optional[int] = num // 1_000
numerals += m_count * "M"
num %= 1_000
__UpperCamelCase : Optional[Any] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__UpperCamelCase : Optional[int] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "/p089_roman.txt") -> int:
'''simple docstring'''
__UpperCamelCase : int = 0
with open(os.path.dirname(SCREAMING_SNAKE_CASE__) + roman_numerals_filename) as filea:
__UpperCamelCase : Dict = filea.readlines()
for line in lines:
__UpperCamelCase : Union[str, Any] = line.strip()
__UpperCamelCase : Optional[Any] = parse_roman_numerals(SCREAMING_SNAKE_CASE__)
__UpperCamelCase : int = generate_roman_numerals(SCREAMING_SNAKE_CASE__)
savings += len(SCREAMING_SNAKE_CASE__) - len(SCREAMING_SNAKE_CASE__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 232 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : int ="src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Dict =direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : Optional[Any] =re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : Dict =re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : str =re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Dict =[
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , SCREAMING_SNAKE_CASE__ )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( ):
A__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A__ = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
A__ = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
A__ = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
A__ = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(SCREAMING_SNAKE_CASE__ ):
A__ = None
if _re_tf_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
A__ = tf_models
A__ = _re_tf_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
A__ = flax_models
A__ = _re_flax_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
A__ = pt_models
A__ = _re_pt_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE__ ) > 0:
if attr_name in model_prefix_to_model_type:
A__ = True
break
# Try again after removing the last word in the name
A__ = """""".join(camel_case_split(SCREAMING_SNAKE_CASE__ )[:-1] )
A__ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
A__ = list(SCREAMING_SNAKE_CASE__ )
all_models.sort()
A__ = {"""model_type""": all_models}
A__ = [pt_models[t] for t in all_models]
A__ = [tf_models[t] for t in all_models]
A__ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
A__ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
A__ = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
A__ = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
A__ = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
A__ = """AutoTokenizer"""
A__ = [processors[t] for t in all_models]
return pd.DataFrame(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
A__ = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
A__ = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# The type of pipeline may not exist in this framework
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
continue
# First extract all model_names
A__ = []
for name in getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).values():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model_names.append(SCREAMING_SNAKE_CASE__ )
else:
model_names.extend(list(SCREAMING_SNAKE_CASE__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
A__ = get_frameworks_table()
A__ = Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
A__ = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=SCREAMING_SNAKE_CASE__ )
A__ = Dataset.from_json(SCREAMING_SNAKE_CASE__ )
A__ = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
}
A__ = update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
A__ = sorted(table.keys() )
A__ = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
A__ = Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ , "pipeline_tags.json" ) )
if commit_sha is not None:
A__ = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
A__ = """Update"""
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=SCREAMING_SNAKE_CASE__ , repo_type="dataset" , token=SCREAMING_SNAKE_CASE__ , commit_message=SCREAMING_SNAKE_CASE__ , )
def UpperCamelCase ( ):
A__ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
A__ = transformers_module.pipelines.SUPPORTED_TASKS
A__ = []
for key in pipeline_tasks:
if key not in in_table:
A__ = pipeline_tasks[key]["""pt"""]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
A__ = model[0]
A__ = model.__name__
if model not in in_table.values():
missing.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
A__ = """, """.join(SCREAMING_SNAKE_CASE__ )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Any =parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 237 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=18 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Optional[int]=400 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
_snake_case : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_snake_case : Optional[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : int = num_channels
_snake_case : List[Any] = image_size
_snake_case : Dict = min_resolution
_snake_case : List[Any] = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Any = size
_snake_case : str = do_center_crop
_snake_case : Union[str, Any] = crop_size
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = MobileNetVaImageProcessingTester(self)
@property
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase , """size"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase , """crop_size"""))
def UpperCamelCase_ ( self : List[str]) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : Dict = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : str = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : int = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """CLIPImageProcessor"""
lowercase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Tuple , snake_case : List[Any]=None , snake_case : Any=None , **snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
UpperCamelCase_ : str = kwargs.pop('feature_extractor' )
UpperCamelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self : Optional[int] , snake_case : Dict=None , snake_case : Optional[int]=None , snake_case : List[Any]=None , **snake_case : str ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase_ : Optional[int] = self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
if images is not None:
UpperCamelCase_ : int = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
UpperCamelCase_ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case : Any , **snake_case : Tuple ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case : Optional[Any] , **snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 175 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __UpperCAmelCase ( a_: Optional[int], a_: Any, a_: List[Any]=None, a_: Union[str, Any]=None, a_: Union[str, Any]=None, a_: Union[str, Any]=None, a_: str=None, a_: Any=None, ):
if attention_mask is None:
_UpperCAmelCase : Union[str, Any] = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_UpperCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_UpperCAmelCase : Optional[int] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : int=9_9 , lowerCAmelCase__ : Any=1_6 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : int=3_2 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : int=0.02 , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Optional[Any] = eos_token_id
_UpperCAmelCase : Optional[int] = pad_token_id
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : int = initializer_range
def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCAmelCase : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCAmelCase : int = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCAmelCase : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
_UpperCAmelCase : Dict = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = 2_0
_UpperCAmelCase : Union[str, Any] = model_class_name(lowerCAmelCase__ )
_UpperCAmelCase : int = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : str = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCAmelCase : int = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = 2_0
_UpperCAmelCase : str = model_class_name(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCAmelCase : Optional[Any] = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = 99
def _lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_UpperCAmelCase : str = input_ids.shape[0]
_UpperCAmelCase : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = self._get_config_and_data()
_UpperCAmelCase : Optional[Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCAmelCase__ )
_UpperCAmelCase : Dict = lm_model(input_ids=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_UpperCAmelCase : Tuple = FlaxBlenderbotSmallForConditionalGeneration(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_UpperCAmelCase : Union[str, Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_UpperCAmelCase : Any = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_UpperCAmelCase : List[str] = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCAmelCase : Tuple = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCAmelCase : List[str] = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCamelCase_ : Tuple = True
UpperCamelCase_ : Optional[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase_ : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : str ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Union[str, Any] = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : Optional[int] = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
_UpperCAmelCase : Any = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : Tuple = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Tuple = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : int = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCAmelCase : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
| 145 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ["""names""", """prefix"""]
a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a__ = ["""encoding_errors""", """on_bad_lines"""]
a__ = ["""date_format"""]
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : str = ","
snake_case_ : Optional[str] = None
snake_case_ : Optional[Union[int, List[int], str]] = "infer"
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case_ : Optional[Union[List[int], List[str]]] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case_ : Optional[list] = None
snake_case_ : Optional[list] = None
snake_case_ : bool = False
snake_case_ : Optional[Union[int, List[int]]] = None
snake_case_ : Optional[int] = None
snake_case_ : Optional[Union[str, List[str]]] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[str] = None
snake_case_ : str = "."
snake_case_ : Optional[str] = None
snake_case_ : str = '"'
snake_case_ : int = 0
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : int = 0
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : Optional[str] = None
snake_case_ : int = 1_00_00
snake_case_ : Optional[datasets.Features] = None
snake_case_ : Optional[str] = "strict"
snake_case_ : Literal["error", "warn", "skip"] = "error"
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.delimiter is not None:
_snake_case : str = self.delimiter
if self.column_names is not None:
_snake_case : str = self.column_names
@property
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Union[str, Any] = CsvConfig
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : int = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : int = [files]
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : List[str] = [files]
_snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_snake_case : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()):
# cheaper cast
_snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
_snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase):
_snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''')
raise
| 317 | 0 |
def lowerCamelCase__ (_UpperCAmelCase = 100_0000):
SCREAMING_SNAKE_CASE = set(range(3 , SCREAMING_SNAKE_CASE__ , 2))
primes.add(2)
for p in range(3 , SCREAMING_SNAKE_CASE__ , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)))
SCREAMING_SNAKE_CASE = [float(SCREAMING_SNAKE_CASE__) for n in range(limit + 1)]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE__ , limit + 1 , SCREAMING_SNAKE_CASE__):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:]))
if __name__ == "__main__":
print(f"""{solution() = }""")
| 137 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[str] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ = 4 )-> list[list[int]]:
'''simple docstring'''
a : Any = abs(SCREAMING_SNAKE_CASE__ ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE__ )] for y in range(SCREAMING_SNAKE_CASE__ )]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(SCREAMING_SNAKE_CASE__ ) )
# OR.. transpose(reverse_column(matrix))
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE__ ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(SCREAMING_SNAKE_CASE__ ) )
# OR.. transpose(reverse_row(matrix))
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = [list(SCREAMING_SNAKE_CASE__ ) for x in zip(*SCREAMING_SNAKE_CASE__ )]
return matrix
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : Dict = matrix[::-1]
return matrix
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : int = [x[::-1] for x in matrix]
return matrix
def lowercase ( A_ )-> None:
'''simple docstring'''
for i in matrix:
print(*SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__lowercase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
__lowercase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
__lowercase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 40 |
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list:
_snake_case : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Optional[int] = j
return prefix_result
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 | 0 |
from functools import reduce
__UpperCAmelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCamelCase ( snake_case__ : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case__ , snake_case__ : str(int(SCREAMING_SNAKE_CASE__ ) * int(SCREAMING_SNAKE_CASE__ ) ) , n[i : i + 13] ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 119 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : List[Any] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
'''simple docstring'''
import random
from typing import Any
def _lowerCAmelCase ( __snake_case : list ) -> list[Any]:
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
__A : int = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
__A : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
__A : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
lowercase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowercase__ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 190 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase : Tuple = open # noqa: we just need to have a builtin inside this module to test it properly
| 253 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : Union[str, Any] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 232 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317 | 0 |
'''simple docstring'''
import math
import qiskit
def UpperCamelCase ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1 ):
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(SCREAMING_SNAKE_CASE__ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
A__ = qiskit.QuantumRegister(4 , "qr" )
A__ = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
A__ = [input_a, input_a, carry_in]
A__ = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE__ ) # measure the last two qbits
A__ = qiskit.Aer.get_backend("aer_simulator" )
A__ = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 237 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self : str) -> str:
"""simple docstring"""
_snake_case : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : int = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : str = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : int = self.get_env()
_snake_case : List[str] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_snake_case : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_snake_case : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Any = self.get_env()
_snake_case : Dict = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# next emulate no network
_snake_case : List[Any] = [sys.executable, """-c""", """\n""".join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : int = """1"""
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
_snake_case : Dict = """
from transformers import pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_snake_case : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_snake_case : Tuple = self.get_env()
_snake_case : Union[str, Any] = """1"""
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, mock, run])]
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""") , )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = """
from transformers import AutoModel
"""
_snake_case : Union[str, Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_snake_case : Any = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Union[str, Any] = self.get_env()
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : List[Any] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
| 317 | 0 |
from math import sqrt
def __lowercase ( lowerCamelCase : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase_ : Tuple = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase_ : Union[str, Any] = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase_ : Optional[Any] = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def __lowercase ( lowerCamelCase : Any ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase_ : Dict = list(range(2 , n + 1 ) )
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase_ : Any = 0
# filters actual prime numbers.
UpperCamelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def __lowercase ( lowerCamelCase : Union[str, Any] ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase_ : Optional[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def __lowercase ( lowerCamelCase : Any ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase_ : Dict = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase_ : Tuple = 2
UpperCamelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def __lowercase ( lowerCamelCase : Optional[Any] ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : str = 0
# prime factorization of 'number'
UpperCamelCase_ : Dict = prime_factorization(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ : Optional[int] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def __lowercase ( lowerCamelCase : Optional[Any] ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Optional[Any] = 0
# prime factorization of 'number'
UpperCamelCase_ : List[str] = prime_factorization(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def __lowercase ( lowerCamelCase : Tuple ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def __lowercase ( lowerCamelCase : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def __lowercase ( lowerCamelCase : Dict ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
UpperCamelCase_ : Optional[int] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase_ : Optional[Any] = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : Union[str, Any] = None
# exit variable. for break up the loops
UpperCamelCase_ : Any = True
while i < len_pn and loop:
UpperCamelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase_ : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : str ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : Optional[Any] = 0
while numbera != 0:
UpperCamelCase_ : List[str] = numbera % numbera
UpperCamelCase_ : List[Any] = numbera
UpperCamelCase_ : Union[str, Any] = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[Any] ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : Tuple = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase_ : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ : int = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
UpperCamelCase_ : str = []
UpperCamelCase_ : int = []
UpperCamelCase_ : str = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ : str = 0
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : str = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase_ : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ : List[str] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
UpperCamelCase_ : Union[str, Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase_ : str = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowercase ( lowerCamelCase : List[Any] ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase_ : int = 0
UpperCamelCase_ : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] ):
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase_ : List[str] = p_number_a + 1 # jump to the next number
UpperCamelCase_ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowercase ( lowerCamelCase : Optional[Any] ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase_ : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowercase ( lowerCamelCase : Any ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase_ : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase_ : Dict = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowercase ( lowerCamelCase : str ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase_ : int = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowercase ( lowerCamelCase : str ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : int = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase_ : Union[str, Any] = ans
ans += fiba
UpperCamelCase_ : Optional[int] = tmp
return ans
| 175 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a__ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
inspect_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
inspect_metric(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
_snake_case : Dict = get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
_snake_case : Optional[Any] = get_dataset_config_names(SCREAMING_SNAKE_CASE__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
_snake_case : Union[str, Any] = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert list(infos.keys() ) == expected_configs
_snake_case : Optional[int] = expected_configs[0]
assert expected_config in infos
_snake_case : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
_snake_case : Dict = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert expected_config in infos
_snake_case : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
| 317 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCamelCase_ : Tuple = """bert-generation"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=5_0_3_5_8 , lowerCAmelCase__ : Any=1_0_2_4 , lowerCAmelCase__ : str=2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : Optional[Any]=4_0_9_6 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Union[str, Any]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=1e-12 , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Optional[int]="absolute" , lowerCAmelCase__ : Union[str, Any]=True , **lowerCAmelCase__ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Union[str, Any] = use_cache
| 145 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split('_')
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4])
SCREAMING_SNAKE_CASE = int(name_split[3][-1])
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 192
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_1841
else:
SCREAMING_SNAKE_CASE = 1000
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset') , 'r'))
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE__): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def lowerCamelCase__ (_UpperCAmelCase):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection')
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.norm' , 'embeddings.norm')
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense')
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self')
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after')
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense')
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace('head' , 'classifier')
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE__)
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.')
SCREAMING_SNAKE_CASE = int(key_split[1])
SCREAMING_SNAKE_CASE = int(key_split[3])
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__)
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(SCREAMING_SNAKE_CASE__)
SCREAMING_SNAKE_CASE = SwinForImageClassification(SCREAMING_SNAKE_CASE__)
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__)
model.load_state_dict(SCREAMING_SNAKE_CASE__)
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-')))
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__).raw)
SCREAMING_SNAKE_CASE = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt')
SCREAMING_SNAKE_CASE = timm_model(inputs['pixel_values'])
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE__).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3)
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(SCREAMING_SNAKE_CASE__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 137 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
A_ : str = ["model.decoder.embed_positions.weights"]
def A ( snake_case__ ):
'''simple docstring'''
if "emb" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(state_dict.keys() )
SCREAMING_SNAKE_CASE__ = {}
for key in keys:
SCREAMING_SNAKE_CASE__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = rename_keys(SCREAMING_SNAKE_CASE__ )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE__ = val[:hidden_size, :]
SCREAMING_SNAKE_CASE__ = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE__ = val
else:
SCREAMING_SNAKE_CASE__ = val
return state_dict, enc_dec_proj_state_dict
def A ( snake_case__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE__ = 10_24
SCREAMING_SNAKE_CASE__ = 24
SCREAMING_SNAKE_CASE__ = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE__ = 15_36
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE__ = 20_48
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 32
else:
raise ValueError(f"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" )
SCREAMING_SNAKE_CASE__ = MusicgenDecoderConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , ffn_dim=hidden_size * 4 , num_hidden_layers=SCREAMING_SNAKE_CASE__ , num_attention_heads=SCREAMING_SNAKE_CASE__ , )
return config
@torch.no_grad()
def A ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__="cpu" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MusicGen.get_pretrained(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = decoder_config_from_checkpoint(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE__ = rename_state_dict(
SCREAMING_SNAKE_CASE__ , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE__ = TaEncoderModel.from_pretrained("""t5-base""" )
SCREAMING_SNAKE_CASE__ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
SCREAMING_SNAKE_CASE__ = MusicgenForCausalLM(SCREAMING_SNAKE_CASE__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE__ = decoder.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
SCREAMING_SNAKE_CASE__ = MusicgenForConditionalGeneration(text_encoder=SCREAMING_SNAKE_CASE__ , audio_encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(SCREAMING_SNAKE_CASE__ )
# check we can do a forward pass
SCREAMING_SNAKE_CASE__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""t5-base""" )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE__ = MusicgenProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE__ = 20_48
SCREAMING_SNAKE_CASE__ = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE__ = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 3.0
if pytorch_dump_folder is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
A_ : str = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 165 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : str , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
| 317 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _A ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict=13 , __UpperCAmelCase : Union[str, Any]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Tuple=99 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : int=64 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : Dict=1 , ):
a : Any = parent
a : Union[str, Any] = batch_size
a : Optional[Any] = seq_length
a : Optional[int] = is_training
a : Tuple = use_input_mask
a : Dict = use_token_type_ids
a : Union[str, Any] = use_labels
a : Optional[int] = vocab_size
a : str = hidden_size
a : int = num_hidden_layers
a : List[Any] = num_attention_heads
a : str = intermediate_size
a : Dict = hidden_act
a : List[Any] = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : str = type_vocab_size
a : Optional[Any] = type_sequence_label_size
a : Dict = initializer_range
a : Optional[int] = num_labels
a : Union[str, Any] = num_choices
a : List[str] = scope
a : List[Any] = q_groups
a : str = k_groups
a : Union[str, Any] = v_groups
a : List[Any] = post_attention_groups
a : Optional[Any] = intermediate_groups
a : Union[str, Any] = output_groups
def __snake_case ( self : Optional[Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : str = None
if self.use_input_mask:
a : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a : List[Any] = None
a : Optional[Any] = None
a : Dict = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : int = ids_tensor([self.batch_size] , self.num_choices)
a : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : str):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __snake_case ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str]):
a : Optional[int] = SqueezeBertModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]):
a : Optional[Any] = SqueezeBertForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple):
a : List[Any] = SqueezeBertForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Any = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
a : Dict = self.num_labels
a : List[str] = SqueezeBertForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Optional[int] = self.num_labels
a : Optional[int] = SqueezeBertForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict):
a : Optional[int] = self.num_choices
a : int = SqueezeBertForMultipleChoice(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : int = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : Any):
a : Union[str, Any] = self.prepare_config_and_inputs()
(a) : str = config_and_inputs
a : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase : str = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Dict = False
def __snake_case ( self : int):
a : str = SqueezeBertModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , dim=37)
def __snake_case ( self : str):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[int]):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__UpperCAmelCase)
def __snake_case ( self : int):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : Any):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCAmelCase)
def __snake_case ( self : Any):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Dict):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[Any]):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = SqueezeBertModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Optional[Any]):
a : List[str] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli")
a : int = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]])
a : List[Any] = model(__UpperCAmelCase)[0]
a : Dict = torch.Size((1, 3))
self.assertEqual(output.shape , __UpperCAmelCase)
a : Dict = torch.tensor([[0.6_401, -0.0_349, -0.6_041]])
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-4))
| 40 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """efficientnet"""
def __init__( self : List[Any] , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 600 , lowerCAmelCase : float = 2.0 , lowerCAmelCase : float = 3.1 , lowerCAmelCase : int = 8 , lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase : List[int] = [] , lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase : float = 0.25 , lowerCAmelCase : str = "swish" , lowerCAmelCase : int = 2560 , lowerCAmelCase : str = "mean" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 0.001 , lowerCAmelCase : float = 0.99 , lowerCAmelCase : float = 0.5 , lowerCAmelCase : float = 0.2 , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[int] = num_channels
_snake_case : str = image_size
_snake_case : Tuple = width_coefficient
_snake_case : List[str] = depth_coefficient
_snake_case : List[Any] = depth_divisor
_snake_case : str = kernel_sizes
_snake_case : Any = in_channels
_snake_case : Optional[Any] = out_channels
_snake_case : str = depthwise_padding
_snake_case : Tuple = strides
_snake_case : Dict = num_block_repeats
_snake_case : int = expand_ratios
_snake_case : Tuple = squeeze_expansion_ratio
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : Tuple = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = batch_norm_eps
_snake_case : Optional[Any] = batch_norm_momentum
_snake_case : str = dropout_rate
_snake_case : Union[str, Any] = drop_connect_rate
_snake_case : Optional[int] = sum(lowerCAmelCase) * 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 1E-5
| 317 | 0 |
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : str ) -> List[str]:
UpperCamelCase : Any = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase : Any = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 119 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 0 |
def __lowerCamelCase ( UpperCamelCase__ = 1000000 ):
'''simple docstring'''
snake_case_ = limit + 1
snake_case_ = [0] * limit
for first_term in range(1 , SCREAMING_SNAKE_CASE__ ):
for n in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
snake_case_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 285 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list ) -> list:
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , 0 , -1 ):
__A : List[Any] = False
for j in range(SCREAMING_SNAKE_CASE__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__A : Union[str, Any] = unsorted[j - 1], unsorted[j]
__A : List[Any] = True
for j in range(SCREAMING_SNAKE_CASE__ ):
if unsorted[j] > unsorted[j + 1]:
__A : List[str] = unsorted[j + 1], unsorted[j]
__A : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 190 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Tuple = field
_snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths}
_snake_case : int = Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
if self.streaming:
_snake_case : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
_snake_case : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
_snake_case : Optional[Any] = dataset
_snake_case : str = path_or_buf
_snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : Tuple = num_proc
_snake_case : Dict = """utf-8"""
_snake_case : str = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase)
_snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""")
_snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False)
_snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True)
_snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer:
_snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""")
_snake_case : Tuple = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
return written
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args
_snake_case : int = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_snake_case : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase)
if not json_str.endswith("""\n"""):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(lowerCAmelCase)
else:
_snake_case , _snake_case : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase)
return written
| 317 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n'
def A_ ( a , a , a=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def A_ ( a , a=5_1_2 , a=5_1_2 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE_ : List[str] = arr.astype(np.floataa ) / 1_2_7.5 - 1
SCREAMING_SNAKE_CASE_ : str = np.transpose(SCREAMING_SNAKE_CASE__ , [2, 0, 1] )
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
return image
class _A ( SCREAMING_SNAKE_CASE_):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE_ : Any = image
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_SCREAMING_SNAKE_CASE )
]
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
else:
SCREAMING_SNAKE_CASE_ : Dict = self.movq.encode(_SCREAMING_SNAKE_CASE ).latent_dist.sample(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE_ : str = init_latents.shape
SCREAMING_SNAKE_CASE_ : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
SCREAMING_SNAKE_CASE_ : int = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = init_latents
return latents
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ : int = torch.device(f"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ : Dict = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 0.3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self._execution_device
SCREAMING_SNAKE_CASE_ : List[Any] = guidance_scale > 1.0
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : str = image_embeds.shape[0]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Dict = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = [image]
if not all(isinstance(_SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([prepare_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.movq.encode(_SCREAMING_SNAKE_CASE )["""latents"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = latents.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE_ : List[Any] = downscale_height_and_width(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_latents(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ : Tuple = {"""image_embeds""": image_embeds}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ : Dict = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : Tuple = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )[0]
# post-processing
SCREAMING_SNAKE_CASE_ : Dict = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ : Dict = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ : List[str] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : int = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 253 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10**9) -> int:
'''simple docstring'''
__UpperCamelCase : Tuple = 1
__UpperCamelCase : List[Any] = 2
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Optional[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCamelCase : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"{solution() = }")
| 232 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int = 60_08_51_47_51_43 ):
try:
A__ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 237 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=18 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Optional[int]=400 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
_snake_case : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_snake_case : Optional[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : int = num_channels
_snake_case : List[Any] = image_size
_snake_case : Dict = min_resolution
_snake_case : List[Any] = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Any = size
_snake_case : str = do_center_crop
_snake_case : Union[str, Any] = crop_size
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = MobileNetVaImageProcessingTester(self)
@property
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase , """size"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase , """crop_size"""))
def UpperCamelCase_ ( self : List[str]) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : Dict = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : str = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : int = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317 | 0 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Optional[int] , snake_case : int , snake_case : Tuple=False ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase_ : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , snake_case : Tuple , snake_case : Dict=1_3 , snake_case : Optional[int]=7 , snake_case : str=True , snake_case : List[Any]=True , snake_case : Dict=True , snake_case : int=True , snake_case : List[str]=9_9 , snake_case : List[str]=3_2 , snake_case : str=3_2 , snake_case : List[Any]=2 , snake_case : int=4 , snake_case : Any=3_7 , snake_case : List[str]="gelu" , snake_case : List[str]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[int]=5_1_2 , snake_case : List[str]=1_6 , snake_case : Any=2 , snake_case : Optional[int]=0.02 , snake_case : Tuple=3 , snake_case : int=4 , snake_case : Tuple=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : int = seq_length
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Tuple = use_input_mask
UpperCamelCase_ : Union[str, Any] = use_token_type_ids
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : List[Any] = vocab_size
UpperCamelCase_ : Tuple = hidden_size
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : List[str] = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Union[str, Any] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : Optional[Any] = max_position_embeddings
UpperCamelCase_ : Optional[int] = type_vocab_size
UpperCamelCase_ : Dict = type_sequence_label_size
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : int = num_labels
UpperCamelCase_ : Union[str, Any] = num_choices
UpperCamelCase_ : Optional[int] = scope
UpperCamelCase_ : List[str] = embedding_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : int = None
if self.use_token_type_ids:
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Any = None
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Any = None
if self.use_labels:
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : str , snake_case : Any , snake_case : Optional[int] , snake_case : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = TFMobileBertModel(config=snake_case )
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : int = model(snake_case )
UpperCamelCase_ : Optional[int] = [input_ids, input_mask]
UpperCamelCase_ : Tuple = model(snake_case )
UpperCamelCase_ : List[str] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict , snake_case : str , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForMaskedLM(config=snake_case )
UpperCamelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Dict , snake_case : Dict , snake_case : List[str] , snake_case : List[Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = TFMobileBertForNextSentencePrediction(config=snake_case )
UpperCamelCase_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining(config=snake_case )
UpperCamelCase_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : List[Any] , snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.num_labels
UpperCamelCase_ : Dict = TFMobileBertForSequenceClassification(config=snake_case )
UpperCamelCase_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : int , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Tuple , snake_case : List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.num_choices
UpperCamelCase_ : List[Any] = TFMobileBertForMultipleChoice(config=snake_case )
UpperCamelCase_ : Dict = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : Optional[Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[Any] , snake_case : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : str = TFMobileBertForTokenClassification(config=snake_case )
UpperCamelCase_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple , snake_case : int , snake_case : Optional[int] , snake_case : List[str] , snake_case : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForQuestionAnswering(config=snake_case )
UpperCamelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
UpperCamelCase_
) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : List[Any] = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCamelCase_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : Union[str, Any] = model(snake_case )[0]
UpperCamelCase_ : List[str] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case )
UpperCamelCase_ : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 175 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@require_torch
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_UpperCAmelCase : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_UpperCAmelCase : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_UpperCAmelCase : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_UpperCAmelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase : Union[str, Any] = """1"""
_UpperCAmelCase : Tuple = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_UpperCAmelCase : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_UpperCAmelCase : int = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_UpperCAmelCase : int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : str = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_UpperCAmelCase : int = self.get_env()
_UpperCAmelCase : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_UpperCAmelCase : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_UpperCAmelCase : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_UpperCAmelCase : Any = self.get_env()
_UpperCAmelCase : Dict = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_UpperCAmelCase : List[Any] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase : int = """1"""
_UpperCAmelCase : Any = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = """
from transformers import pipeline
"""
_UpperCAmelCase : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_UpperCAmelCase : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_UpperCAmelCase : Tuple = self.get_env()
_UpperCAmelCase : Union[str, Any] = """1"""
_UpperCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_UpperCAmelCase : Any = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = """
from transformers import AutoModel
"""
_UpperCAmelCase : Union[str, Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : Any = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_UpperCAmelCase : Union[str, Any] = self.get_env()
_UpperCAmelCase : Tuple = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase : Union[str, Any] = """1"""
_UpperCAmelCase : List[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 145 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ["""names""", """prefix"""]
a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a__ = ["""encoding_errors""", """on_bad_lines"""]
a__ = ["""date_format"""]
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : str = ","
snake_case_ : Optional[str] = None
snake_case_ : Optional[Union[int, List[int], str]] = "infer"
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case_ : Optional[Union[List[int], List[str]]] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case_ : Optional[list] = None
snake_case_ : Optional[list] = None
snake_case_ : bool = False
snake_case_ : Optional[Union[int, List[int]]] = None
snake_case_ : Optional[int] = None
snake_case_ : Optional[Union[str, List[str]]] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[str] = None
snake_case_ : str = "."
snake_case_ : Optional[str] = None
snake_case_ : str = '"'
snake_case_ : int = 0
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : int = 0
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : Optional[str] = None
snake_case_ : int = 1_00_00
snake_case_ : Optional[datasets.Features] = None
snake_case_ : Optional[str] = "strict"
snake_case_ : Literal["error", "warn", "skip"] = "error"
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.delimiter is not None:
_snake_case : str = self.delimiter
if self.column_names is not None:
_snake_case : str = self.column_names
@property
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Union[str, Any] = CsvConfig
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : int = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : int = [files]
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : List[str] = [files]
_snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_snake_case : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()):
# cheaper cast
_snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
_snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase):
_snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''')
raise
| 317 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , a , a , a , a , a=1 , a=False , **a) -> Dict:
super().__init__(**a)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_embed
SCREAMING_SNAKE_CASE = d_proj
SCREAMING_SNAKE_CASE = cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE = [0] + self.cutoffs
SCREAMING_SNAKE_CASE = div_val
SCREAMING_SNAKE_CASE = self.cutoffs[0]
SCREAMING_SNAKE_CASE = len(self.cutoffs) - 1
SCREAMING_SNAKE_CASE = self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE = keep_order
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=a , name='cluster_weight')
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=a , name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=a , name=f'''out_projs_._{i}''' , )
self.out_projs.append(a)
else:
self.out_projs.append(a)
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=a , name=f'''out_layers_._{i}_._weight''' , )
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=a , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE = self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=a , name=f'''out_projs_._{i}''')
self.out_projs.append(a)
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=a , name=f'''out_layers_._{i}_._weight''' , )
SCREAMING_SNAKE_CASE = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=a , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias))
super().build(a)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a , a , a , a=None) -> List[Any]:
SCREAMING_SNAKE_CASE = x
if proj is not None:
SCREAMING_SNAKE_CASE = tf.einsum('ibd,ed->ibe' , a , a)
return tf.einsum('ibd,nd->ibn' , a , a) + b
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = shape_list(a)
SCREAMING_SNAKE_CASE = tf.range(lp_size[0] , dtype=target.dtype)
SCREAMING_SNAKE_CASE = tf.stack([r, target] , 1)
return tf.gather_nd(a , a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a=True , a=False) -> Tuple:
SCREAMING_SNAKE_CASE = 0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE = self._logit(a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
SCREAMING_SNAKE_CASE = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a , logits=a)
SCREAMING_SNAKE_CASE = tf.nn.log_softmax(a , axis=-1)
else:
SCREAMING_SNAKE_CASE = shape_list(a)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
SCREAMING_SNAKE_CASE = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE = (target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE = tf.where(a)
SCREAMING_SNAKE_CASE = tf.boolean_mask(a , a) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE = self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE = self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE = self.out_layers[i][0]
SCREAMING_SNAKE_CASE = self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE = tf.concat([cur_W, self.cluster_weight] , 0)
SCREAMING_SNAKE_CASE = tf.concat([cur_b, self.cluster_bias] , 0)
SCREAMING_SNAKE_CASE = self._logit(a , a , a , self.out_projs[0])
SCREAMING_SNAKE_CASE = tf.nn.log_softmax(a)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
SCREAMING_SNAKE_CASE = tf.boolean_mask(a , a)
SCREAMING_SNAKE_CASE = self._gather_logprob(a , a)
else:
SCREAMING_SNAKE_CASE = self._logit(a , a , a , self.out_projs[i])
SCREAMING_SNAKE_CASE = tf.nn.log_softmax(a)
SCREAMING_SNAKE_CASE = self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a)
if target is not None:
SCREAMING_SNAKE_CASE = tf.boolean_mask(a , a)
SCREAMING_SNAKE_CASE = tf.boolean_mask(a , a)
SCREAMING_SNAKE_CASE = self._gather_logprob(a , a)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a , -cur_logprob , shape_list(a))
SCREAMING_SNAKE_CASE = tf.concat(a , axis=-1)
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE = tf.reduce_mean(a)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a , name=self.name , aggregation='mean' if return_mean else '')
return out
| 137 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(__UpperCAmelCase , max_new_tokens=1_0 , do_sample=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE__ = TextStreamer(__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=1_0 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE__ = cs.out[:-1]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(__UpperCAmelCase , max_new_tokens=1_0 , do_sample=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE__ = TextIteratorStreamer(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE__ = Thread(target=model.generate , kwargs=__UpperCAmelCase )
thread.start()
SCREAMING_SNAKE_CASE__ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(__UpperCAmelCase , max_new_tokens=1_0 , do_sample=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE__ = TextStreamer(__UpperCAmelCase , skip_prompt=__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=1_0 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE__ = cs.out[:-1]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""distilgpt2""" )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = torch.ones((1, 5) , device=__UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE__ = TextStreamer(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
model.generate(__UpperCAmelCase , max_new_tokens=1 , do_sample=__UpperCAmelCase , streamer=__UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE__ = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE__ = tokenizer(__UpperCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = TextIteratorStreamer(__UpperCAmelCase , timeout=0.001 )
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE__ = Thread(target=model.generate , kwargs=__UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = """"""
for new_text in streamer:
streamer_text += new_text
| 165 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : NestedDataStructureLike[PathLike] , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a : Tuple = field
a : str = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase) else {self.split: path_or_paths}
a : int = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def __snake_case ( self : Any):
if self.streaming:
a : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
a : Dict = None
a : Optional[int] = None
a : Optional[Any] = None
a : str = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory)
return dataset
class _A :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dataset , __UpperCAmelCase : Union[PathLike, BinaryIO] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Any , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''')
a : Optional[Any] = dataset
a : str = path_or_buf
a : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a : Tuple = num_proc
a : Dict = """utf-8"""
a : str = to_json_kwargs
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.to_json_kwargs.pop("path_or_buf" , __UpperCAmelCase)
a : Any = self.to_json_kwargs.pop("orient" , "records")
a : List[str] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False)
a : List[Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True)
a : Union[str, Any] = self.to_json_kwargs.pop("compression" , __UpperCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , "wb" , compression=__UpperCAmelCase) as buffer:
a : List[str] = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead.")
a : Tuple = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs)
return written
def __snake_case ( self : Tuple , __UpperCAmelCase : Optional[int]):
a : int = args
a : int = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
a : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : BinaryIO , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[Any] , ):
a : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
a : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(__UpperCAmelCase)
else:
a : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(__UpperCAmelCase)
return written
| 40 |
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list:
_snake_case : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Optional[int] = j
return prefix_result
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ : Optional[Any] = """efficientnet"""
def __init__( self, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 600, SCREAMING_SNAKE_CASE_ = 2.0, SCREAMING_SNAKE_CASE_ = 3.1, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = [3, 3, 5, 3, 5, 5, 3], SCREAMING_SNAKE_CASE_ = [32, 16, 24, 40, 80, 112, 192], SCREAMING_SNAKE_CASE_ = [16, 24, 40, 80, 112, 192, 320], SCREAMING_SNAKE_CASE_ = [], SCREAMING_SNAKE_CASE_ = [1, 2, 2, 2, 1, 2, 1], SCREAMING_SNAKE_CASE_ = [1, 2, 2, 3, 3, 4, 1], SCREAMING_SNAKE_CASE_ = [1, 6, 6, 6, 6, 6, 6], SCREAMING_SNAKE_CASE_ = 0.25, SCREAMING_SNAKE_CASE_ = "swish", SCREAMING_SNAKE_CASE_ = 2560, SCREAMING_SNAKE_CASE_ = "mean", SCREAMING_SNAKE_CASE_ = 0.02, SCREAMING_SNAKE_CASE_ = 0.0_01, SCREAMING_SNAKE_CASE_ = 0.99, SCREAMING_SNAKE_CASE_ = 0.5, SCREAMING_SNAKE_CASE_ = 0.2, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : str = image_size
UpperCamelCase : Tuple = width_coefficient
UpperCamelCase : List[str] = depth_coefficient
UpperCamelCase : List[Any] = depth_divisor
UpperCamelCase : str = kernel_sizes
UpperCamelCase : Any = in_channels
UpperCamelCase : Optional[Any] = out_channels
UpperCamelCase : str = depthwise_padding
UpperCamelCase : Tuple = strides
UpperCamelCase : Dict = num_block_repeats
UpperCamelCase : int = expand_ratios
UpperCamelCase : Tuple = squeeze_expansion_ratio
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Optional[int] = hidden_dim
UpperCamelCase : Tuple = pooling_type
UpperCamelCase : Tuple = initializer_range
UpperCamelCase : List[Any] = batch_norm_eps
UpperCamelCase : Optional[Any] = batch_norm_momentum
UpperCamelCase : str = dropout_rate
UpperCamelCase : Union[str, Any] = drop_connect_rate
UpperCamelCase : Optional[int] = sum(SCREAMING_SNAKE_CASE_ ) * 4
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ : Tuple = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ) -> float:
return 1e-5
| 119 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_UpperCAmelCase : str = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def __lowerCamelCase ( UpperCamelCase__=None ):
'''simple docstring'''
if subparsers is not None:
snake_case_ = subparsers.add_parser('tpu-config' , description=_description )
else:
snake_case_ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
snake_case_ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
snake_case_ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
snake_case_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case_ = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case_ = defaults.commands
if not args.tpu_name:
snake_case_ = defaults.tpu_name
if not args.tpu_zone:
snake_case_ = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case_ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
snake_case_ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE__ ):
snake_case_ = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
snake_case_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case_ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
snake_case_ = """; """.join(SCREAMING_SNAKE_CASE__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case_ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(SCREAMING_SNAKE_CASE__ )}''' )
return
subprocess.run(SCREAMING_SNAKE_CASE__ )
print('Successfully setup pod.' )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = tpu_command_parser()
snake_case_ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE__ )
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> str:
if not (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__A : List[str] = len(SCREAMING_SNAKE_CASE__ )
__A : Tuple = len(SCREAMING_SNAKE_CASE__ )
__A : List[str] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__A : List[str] = 0
__A : str = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__A : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__A : int = i
__A : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _A ( unittest.TestCase):
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionLDMaDPipeline
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Any = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs["""prompt"""]]
# forward
SCREAMING_SNAKE_CASE_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Tuple = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[str] = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : Dict = ldmad_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[Any] = text_inputs["""input_ids"""].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ldmad_pipe.text_encoder(_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE_ : str = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : List[str] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : int = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = """french fries"""
SCREAMING_SNAKE_CASE_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Dict = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
SCREAMING_SNAKE_CASE_ : Any = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
SCREAMING_SNAKE_CASE_ : Dict = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Dict = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
SCREAMING_SNAKE_CASE_ : str = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
SCREAMING_SNAKE_CASE_ : Dict = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE_ : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Dict = 0.495586
SCREAMING_SNAKE_CASE_ : Any = 0.33795515
SCREAMING_SNAKE_CASE_ : List[str] = 112.48518
SCREAMING_SNAKE_CASE_ : List[Any] = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = output.rgb, output.depth
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.4194127
SCREAMING_SNAKE_CASE_ : int = 0.35375586
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.5638502
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 253 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase : int = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowercase : Optional[int] = []
lowercase : int = []
lowercase : Any = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowercase : List[str] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
'emoji': True,
},
}
]
lowercase : Union[str, Any] = 0
for log in Path().glob('*.log'):
lowercase : Optional[int] = 0
with open(log, 'r') as f:
for line in f:
lowercase : Dict = json.loads(line)
if line.get('nodeid', '') != "":
lowercase : List[str] = line['nodeid']
if line.get('duration', None) is not None:
lowercase : List[str] = f"{line['duration']:.4f}"
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase : Union[str, Any] = []
log.unlink()
lowercase : Optional[Any] = ''
lowercase : Any = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowercase : List[str] = []
lowercase : Optional[int] = {}
for test in failed_tests:
lowercase : Any = test[0].split('::')
lowercase : List[str] = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowercase : Optional[Any] = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase : Tuple = [test[0] for test in failed_table]
lowercase : Any = list(set(files))
# Count number of instances in failed_tests
lowercase : Union[str, Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase : List[str] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase : Optional[Any] = 'Too many failed tests, please see the full report in the Action results.'
lowercase : List[str] = len(err) + 10
lowercase : Dict = message[: 3000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
lowercase : Union[str, Any] = 'No failed tests! 🤗'
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowercase : Union[str, Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowercase : Any = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowercase : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase : Union[str, Any] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase : int = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowercase : Dict = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase : Tuple = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase : Dict = row[0]
else:
lowercase : Dict = ''
lowercase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 232 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return getitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return setitem, k, v
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
return delitem, k
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
a__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
_snake_case : List[Any] = HashMap(initial_block_size=4 )
_snake_case : int = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case , _snake_case : Tuple = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def lowercase ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE__ : str ) -> bool:
return not name.startswith("""_""" )
_snake_case : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 317 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase :
__lowercase = 42
__lowercase = None
# Automatically constructed
__lowercase = "dict"
__lowercase = None
__lowercase = field(default="""Translation""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self :Tuple )-> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase_ ( self :int )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase :
__lowercase = None
__lowercase = None
__lowercase = None
# Automatically constructed
__lowercase = "dict"
__lowercase = None
__lowercase = field(default="""TranslationVariableLanguages""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self :Optional[int] )-> List[str]:
A__ = sorted(set(self.languages ) ) if self.languages else None
A__ = len(self.languages ) if self.languages else None
def __call__( self :Dict )-> Union[str, Any]:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] )-> Tuple:
A__ = set(self.languages )
if self.languages and set(lowercase_ ) - lang_set:
raise ValueError(
F"Some languages in example ({', '.join(sorted(set(lowercase_ ) - lang_set ) )}) are not in valid set ({', '.join(lowercase_ )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A__ = []
for lang, text in translation_dict.items():
if isinstance(lowercase_ , lowercase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A__ = zip(*sorted(lowercase_ ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase_ ( self :List[str] )-> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 237 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@require_torch
def UpperCamelCase_ ( self : str) -> str:
"""simple docstring"""
_snake_case : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_snake_case : List[Any] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_snake_case : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_snake_case : int = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_snake_case : int = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowerCAmelCase)
BertModel.from_pretrained(lowerCAmelCase)
BertTokenizer.from_pretrained(lowerCAmelCase)
pipeline(task="""fill-mask""" , model=lowerCAmelCase)
# baseline - just load from_pretrained with normal network
_snake_case : str = [sys.executable, """-c""", """\n""".join([load, run, mock])]
# should succeed
_snake_case : int = self.get_env()
_snake_case : List[str] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_snake_case : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_snake_case : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Any = self.get_env()
_snake_case : Dict = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# next emulate no network
_snake_case : List[Any] = [sys.executable, """-c""", """\n""".join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : int = """1"""
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
@require_torch
def UpperCamelCase_ ( self : Any) -> Any:
"""simple docstring"""
_snake_case : Dict = """
from transformers import pipeline
"""
_snake_case : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_snake_case : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_snake_case : Tuple = self.get_env()
_snake_case : Union[str, Any] = """1"""
_snake_case : int = [sys.executable, """-c""", """\n""".join([load, mock, run])]
_snake_case : Any = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""") , )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = """
from transformers import AutoModel
"""
_snake_case : Union[str, Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_snake_case : Any = [sys.executable, """-c""", """\n""".join([load, run])]
# should succeed
_snake_case : Union[str, Any] = self.get_env()
_snake_case : Tuple = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_snake_case : Union[str, Any] = """1"""
_snake_case : List[Any] = subprocess.run(lowerCAmelCase , env=lowerCAmelCase , check=lowerCAmelCase , capture_output=lowerCAmelCase)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn("""success""" , result.stdout.decode())
| 317 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *snake_case : Dict , **snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
lowercase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any , snake_case : Tuple , snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
UpperCamelCase_ : Optional[Any] = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Union[str, Any] , snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase_ : Optional[int] = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
'score': ANY(snake_case ),
'label': ANY(snake_case ),
'box': {'xmin': ANY(snake_case ), 'ymin': ANY(snake_case ), 'xmax': ANY(snake_case ), 'ymax': ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
UpperCamelCase_ : Optional[int] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
] , )
UpperCamelCase_ : List[str] = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.7235, 'label': 'cat', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7218, 'label': 'remote', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.7184, 'label': 'couch', 'box': {'xmin': 2_0_4, 'ymin': 1_6_7, 'xmax': 2_3_2, 'ymax': 1_9_0}},
{'score': 0.6748, 'label': 'remote', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6656, 'label': 'cat', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6614, 'label': 'couch', 'box': {'xmin': 5_7_1, 'ymin': 8_3, 'xmax': 5_9_8, 'ymax': 1_0_3}},
{'score': 0.6456, 'label': 'remote', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 6_7, 'ymin': 2_7_4, 'xmax': 9_3, 'ymax': 2_9_7}},
{'score': 0.6419, 'label': 'cat', 'box': {'xmin': 4_9_4, 'ymin': 1_0_5, 'xmax': 5_2_1, 'ymax': 1_2_7}},
]
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = pipeline('zero-shot-object-detection' )
UpperCamelCase_ : Optional[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
] , )
UpperCamelCase_ : Optional[Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
[
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
{'score': 0.1474, 'label': 'remote', 'box': {'xmin': 3_3_5, 'ymin': 7_4, 'xmax': 3_7_1, 'ymax': 1_8_7}},
{'score': 0.1208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_4_2, 'ymax': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 0.2
UpperCamelCase_ : Union[str, Any] = pipeline('zero-shot-object-detection' )
UpperCamelCase_ : Dict = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
{'score': 0.2537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 5_5, 'xmax': 3_1_5, 'ymax': 4_7_2}},
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = 2
UpperCamelCase_ : str = pipeline('zero-shot-object-detection' )
UpperCamelCase_ : List[Any] = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{'score': 0.2868, 'label': 'cat', 'box': {'xmin': 3_2_4, 'ymin': 2_0, 'xmax': 6_4_0, 'ymax': 3_7_3}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_2, 'xmax': 1_7_7, 'ymax': 1_1_5}},
] , )
| 175 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a__ = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
inspect_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
inspect_metric(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = path + """.py"""
assert script_name in os.listdir(SCREAMING_SNAKE_CASE__ )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
_snake_case : Dict = get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_config_info(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
_snake_case : Optional[Any] = get_dataset_config_names(SCREAMING_SNAKE_CASE__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
_snake_case : Union[str, Any] = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert list(infos.keys() ) == expected_configs
_snake_case : Optional[int] = expected_configs[0]
assert expected_config in infos
_snake_case : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
_snake_case : Dict = get_dataset_infos(SCREAMING_SNAKE_CASE__ )
assert expected_config in infos
_snake_case : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
get_dataset_split_names(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
| 317 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=1_8 , lowerCAmelCase__ : Dict=3_0 , lowerCAmelCase__ : Optional[int]=4_0_0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_0}
_UpperCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Dict = min_resolution
_UpperCAmelCase : List[Any] = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : Any = size
_UpperCAmelCase : str = do_center_crop
_UpperCAmelCase : Union[str, Any] = crop_size
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = MobileNetVaImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
_UpperCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : Dict = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : str = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
_UpperCAmelCase : int = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 145 |
import pprint
import requests
a__ = """https://zenquotes.io/api"""
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def lowercase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a__ = random_quotes()
pprint.pprint(response)
| 317 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a_ : int = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=32 , a=2 , a=1 , a=0 , a=0.02 , ) -> Dict:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = initializer_range
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
SCREAMING_SNAKE_CASE = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
SCREAMING_SNAKE_CASE = shift_tokens_right(a , 1 , 2)
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_inputs_dict(a , a , a)
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(a)
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'])
SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , a , a)
SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4')
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , )
SCREAMING_SNAKE_CASE = model.decode(a , a)
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(a)
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'])
SCREAMING_SNAKE_CASE = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , a , a)
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , )
SCREAMING_SNAKE_CASE = model.decode(a , a , decoder_attention_mask=a)
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''')
@require_flax
class _snake_case ( unittest.TestCase ):
_lowercase : int = 99
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE = input_ids.shape[0]
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self._get_config_and_data()
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(a)
SCREAMING_SNAKE_CASE = lm_model(input_ids=a)
SCREAMING_SNAKE_CASE = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration(a)
SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE = lm_model(input_ids=a , decoder_input_ids=a)
SCREAMING_SNAKE_CASE = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE = shift_tokens_right(a , 1 , 2)
SCREAMING_SNAKE_CASE = np.equal(a , 1).astype(np.floataa).sum()
SCREAMING_SNAKE_CASE = np.equal(a , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(a , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
_lowercase : List[str] = True
_lowercase : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_lowercase : Dict = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = FlaxBlenderbotModelTester(self)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a , a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE = self._prepare_for_class(a , a)
SCREAMING_SNAKE_CASE = model_class(a)
@jax.jit
def encode_jitted(a , a=None , **a):
return model.encode(input_ids=a , attention_mask=a)
with self.subTest('JIT Enabled'):
SCREAMING_SNAKE_CASE = encode_jitted(**a).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = encode_jitted(**a).to_tuple()
self.assertEqual(len(a) , len(a))
for jitted_output, output in zip(a , a):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'])
SCREAMING_SNAKE_CASE = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a , a , a):
return model.decode(
decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , )
with self.subTest('JIT Enabled'):
SCREAMING_SNAKE_CASE = decode_jitted(**a).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = decode_jitted(**a).to_tuple()
self.assertEqual(len(a) , len(a))
for jitted_output, output in zip(a , a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('facebook/blenderbot-400M-distill')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE = np.ones((1, 1)) * model.config.eos_token_id
SCREAMING_SNAKE_CASE = model(a)
self.assertIsNotNone(a)
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
SCREAMING_SNAKE_CASE = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
SCREAMING_SNAKE_CASE = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=a)
SCREAMING_SNAKE_CASE = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B')
SCREAMING_SNAKE_CASE = ["""Sam"""]
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='jax')
SCREAMING_SNAKE_CASE = model.generate(**a , **a)
SCREAMING_SNAKE_CASE = """Sam is a great name. It means \"sun\" in Gaelic."""
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a , **a)
assert generated_txt[0].strip() == tgt_text
| 137 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """swin"""
snake_case_ : Optional[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , lowerCAmelCase : Optional[int]=224 , lowerCAmelCase : int=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : int=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Any=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : int = image_size
_snake_case : Any = patch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : int = embed_dim
_snake_case : Dict = depths
_snake_case : Dict = len(lowerCAmelCase)
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
_snake_case : int = mlp_ratio
_snake_case : Any = qkv_bias
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Optional[Any] = drop_path_rate
_snake_case : List[Any] = hidden_act
_snake_case : str = use_absolute_embeddings
_snake_case : Tuple = layer_norm_eps
_snake_case : Any = initializer_range
_snake_case : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case : Dict = int(embed_dim * 2 ** (len(lowerCAmelCase) - 1))
_snake_case : Optional[Any] = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase) + 1)]
_snake_case , _snake_case : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Dict) -> float:
"""simple docstring"""
return 1E-4
| 317 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = """weight_v"""
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = """weight"""
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = """bias"""
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ = name.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ = target_dict.pad_index
SCREAMING_SNAKE_CASE__ = target_dict.bos_index
SCREAMING_SNAKE_CASE__ = target_dict.eos_index
SCREAMING_SNAKE_CASE__ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == """layer""" else False
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ = HubertForCTC(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ = HubertModel(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A_ : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 165 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
def lowercase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
requires_backends(SCREAMING_SNAKE_CASE__ , ["""torch"""] )
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : str , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : int , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : int , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Any , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""torch"""]
def __init__( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[int] = ["""torch"""]
def __init__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : int) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[Any] = ["""torch"""]
def __init__( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int]) -> int:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = ["""torch"""]
def __init__( self : Any , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any]) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = ["""torch"""]
def __init__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = ["""torch"""]
def __init__( self : Optional[int] , *lowerCAmelCase : int , **lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
class snake_case ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = ["""torch"""]
def __init__( self : List[str] , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : Dict , *lowerCAmelCase : str , **lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
@classmethod
def UpperCamelCase_ ( cls : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""torch"""])
| 317 | 0 |
"""simple docstring"""
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : Union[str, Any] = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
a : int = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__lowercase = input("""Enter numbers separated by a comma:\n""").strip()
__lowercase = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 40 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """efficientnet"""
def __init__( self : List[Any] , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 600 , lowerCAmelCase : float = 2.0 , lowerCAmelCase : float = 3.1 , lowerCAmelCase : int = 8 , lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase : List[int] = [] , lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase : float = 0.25 , lowerCAmelCase : str = "swish" , lowerCAmelCase : int = 2560 , lowerCAmelCase : str = "mean" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 0.001 , lowerCAmelCase : float = 0.99 , lowerCAmelCase : float = 0.5 , lowerCAmelCase : float = 0.2 , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[int] = num_channels
_snake_case : str = image_size
_snake_case : Tuple = width_coefficient
_snake_case : List[str] = depth_coefficient
_snake_case : List[Any] = depth_divisor
_snake_case : str = kernel_sizes
_snake_case : Any = in_channels
_snake_case : Optional[Any] = out_channels
_snake_case : str = depthwise_padding
_snake_case : Tuple = strides
_snake_case : Dict = num_block_repeats
_snake_case : int = expand_ratios
_snake_case : Tuple = squeeze_expansion_ratio
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : Tuple = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = batch_norm_eps
_snake_case : Optional[Any] = batch_norm_momentum
_snake_case : str = dropout_rate
_snake_case : Union[str, Any] = drop_connect_rate
_snake_case : Optional[int] = sum(lowerCAmelCase) * 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 1E-5
| 317 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : str = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : Any = F"""facebook/wmt19-{pair}"""
UpperCamelCase : List[str] = self.get_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.get_model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = bleu_data[pair]["""src"""]
UpperCamelCase : Optional[int] = bleu_data[pair]["""tgt"""]
UpperCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE_, padding='longest' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = model.generate(
input_ids=batch.input_ids, num_beams=8, )
UpperCamelCase : Any = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = calculate_bleu(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(scores['bleu'], SCREAMING_SNAKE_CASE_ )
| 119 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
snake_case_ : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
snake_case_ : str = "question"
snake_case_ : str = "context"
snake_case_ : str = "answers"
@property
def UpperCamelCase_ ( self : Any) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a ( self ):
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
snake_case_ = AutoTokenizer.from_pretrained('google/mt5-small' )
snake_case_ = tokenizer('Hello there' , return_tensors='tf' ).input_ids
snake_case_ = tokenizer('Hi I am' , return_tensors='tf' ).input_ids
snake_case_ = model(snake_case , labels=snake_case ).loss
snake_case_ = -tf.math.reduce_mean(snake_case ).numpy()
snake_case_ = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 285 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 317 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase__ : int = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : bool , __snake_case : bool ) -> List[Any]:
def run_func(__snake_case : Optional[int] ):
@wraps(SCREAMING_SNAKE_CASE__ )
def run_in_eager_mode(*__snake_case : int , **__snake_case : List[Any] ):
return func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@wraps(SCREAMING_SNAKE_CASE__ )
@tf.function(experimental_compile=SCREAMING_SNAKE_CASE__ )
def run_in_graph_mode(*__snake_case : str , **__snake_case : List[Any] ):
return func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int ) -> ["tf.Tensor"]:
__A : Union[str, Any] = random.Random()
__A : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(SCREAMING_SNAKE_CASE__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = "TensorFlow"
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return tf.__version__
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__A : int = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return self._measure_speed(_inference)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__A : Any = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return self._measure_speed(_train)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase)
__A : Dict = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__A : Optional[int] = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return self._measure_memory(_inference)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase)
__A : int = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__A : str = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return self._measure_memory(_train)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.')
__A : Union[str, Any] = (
hasattr(_UpperCAmelCase , 'architectures')
and isinstance(config.architectures , _UpperCAmelCase)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__A : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__A : Optional[int] = __import__('transformers' , fromlist=[model_class])
__A : List[str] = getattr(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = model_cls(_UpperCAmelCase)
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
__A : int = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase)
# encoder-decoder has vocab size saved differently
__A : Union[str, Any] = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size') else config.encoder.vocab_size
__A : Tuple = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_UpperCAmelCase , training=_UpperCAmelCase)
__A : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.')
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.')
__A : List[Any] = (
hasattr(_UpperCAmelCase , 'architectures')
and isinstance(config.architectures , _UpperCAmelCase)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__A : Union[str, Any] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__A : Optional[int] = __import__('transformers' , fromlist=[model_class])
__A : Any = getattr(_UpperCAmelCase , _UpperCAmelCase)
__A : Optional[int] = model_cls(_UpperCAmelCase)
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
__A : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase)
# encoder-decoder has vocab size saved differently
__A : Union[str, Any] = config.vocab_size if hasattr(_UpperCAmelCase , 'vocab_size') else config.encoder.vocab_size
__A : Dict = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
__A : Dict = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase)[0]
__A : int = tf.gradients(_UpperCAmelCase , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
__A : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase)[0]
__A : Dict = tf.gradients(_UpperCAmelCase , model.trainable_variables)
return gradients
__A : str = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation')
timeit.repeat(_UpperCAmelCase , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__A : Any = timeit.repeat(
_UpperCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_UpperCAmelCase) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.')
__A : Union[str, Any] = start_memory_tracing('transformers')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.')
__A : Any = """N/A"""
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.')
# init nvml
nvml.nvmlInit()
func()
__A : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
__A : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase)
__A : Tuple = meminfo.used
__A : str = Memory(_UpperCAmelCase)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.')
__A : Tuple = None
else:
__A : Optional[int] = measure_peak_memory_cpu(_UpperCAmelCase)
__A : str = Memory(_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else memory_bytes
if self.args.trace_memory_line_by_line:
__A : Union[str, Any] = stop_memory_tracing(_UpperCAmelCase)
if memory is None:
__A : List[str] = summary.total
else:
__A : List[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}')
return "N/A", None
| 190 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Tuple = field
_snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths}
_snake_case : int = Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
if self.streaming:
_snake_case : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
_snake_case : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
_snake_case : Optional[Any] = dataset
_snake_case : str = path_or_buf
_snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : Tuple = num_proc
_snake_case : Dict = """utf-8"""
_snake_case : str = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase)
_snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""")
_snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False)
_snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True)
_snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer:
_snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""")
_snake_case : Tuple = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
return written
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args
_snake_case : int = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_snake_case : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase)
if not json_str.endswith("""\n"""):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(lowerCAmelCase)
else:
_snake_case , _snake_case : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase)
return written
| 317 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : int = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
lowerCAmelCase : Tuple = {
'camembert-base': 5_12,
}
lowerCAmelCase : List[Any] = '▁'
class _A ( SCREAMING_SNAKE_CASE_):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Dict = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = CamembertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[int] = False if not self.vocab_file else True
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
import numpy as np
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 , _lowerCamelCase : int | None = None) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = np.random.default_rng(seed=SCREAMING_SNAKE_CASE__)
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__UpperCamelCase : Any = 6 * key_len
# Measurement basis for Alice's qubits.
__UpperCamelCase : List[str] = rng.integers(2 , size=SCREAMING_SNAKE_CASE__)
# The set of states Alice will prepare.
__UpperCamelCase : List[str] = rng.integers(2 , size=SCREAMING_SNAKE_CASE__)
# Measurement basis for Bob's qubits.
__UpperCamelCase : Any = rng.integers(2 , size=SCREAMING_SNAKE_CASE__)
# Quantum Circuit to simulate BB84
__UpperCamelCase : str = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , name="BB84")
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE__):
if alice_state[index] == 1:
bbaa_circ.x(SCREAMING_SNAKE_CASE__)
if alice_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE__)
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE__):
if bob_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE__)
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__UpperCamelCase : Dict = qiskit.Aer.get_backend("aer_simulator")
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__UpperCamelCase : Optional[int] = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1 , seed_simulator=SCREAMING_SNAKE_CASE__)
# Returns the result of measurement.
__UpperCamelCase : str = job.result().get_counts(SCREAMING_SNAKE_CASE__).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__UpperCamelCase : Tuple = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if alice_basis_bit == bob_basis_bit
])
# Get final key. Pad with 0 if too short, otherwise truncate.
__UpperCamelCase : str = gen_key[:key_len] if len(SCREAMING_SNAKE_CASE__) >= key_len else gen_key.ljust(SCREAMING_SNAKE_CASE__ , "0")
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 232 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( _lowerCamelCase : list[float] ):
A__ = 0.0_0
A__ = 0
for resistor in resistors:
if resistor <= 0:
A__ = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(SCREAMING_SNAKE_CASE__ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE__ )
index += 1
return 1 / first_sum
def UpperCamelCase ( _lowerCamelCase : list[float] ):
A__ = 0.0_0
A__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
A__ = F"Resistor at index {index} has a negative value!"
raise ValueError(SCREAMING_SNAKE_CASE__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=7 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Optional[Any]=18 , lowerCAmelCase : Dict=30 , lowerCAmelCase : Optional[int]=400 , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=None , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
_snake_case : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_snake_case : Optional[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : int = num_channels
_snake_case : List[Any] = image_size
_snake_case : Dict = min_resolution
_snake_case : List[Any] = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Any = size
_snake_case : str = do_center_crop
_snake_case : Union[str, Any] = crop_size
def UpperCamelCase_ ( self : int) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Any) -> Optional[Any]:
"""simple docstring"""
_snake_case : str = MobileNetVaImageProcessingTester(self)
@property
def UpperCamelCase_ ( self : int) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase , """size"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop"""))
self.assertTrue(hasattr(lowerCAmelCase , """crop_size"""))
def UpperCamelCase_ ( self : List[str]) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
_snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
def UpperCamelCase_ ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : Dict = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : int) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
_snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : str = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_snake_case : int = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=snake_case , )
assert hasattr(self , 'env' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
UpperCamelCase_ : Dict = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case , instance_count=snake_case , instance_type=self.instance_type , debugger_hook_config=snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case , py_version='py36' , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Any ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(snake_case ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(2,)] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.create_estimator(snake_case )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , snake_case )
| 175 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Dict = """xlm-roberta"""
def __init__( self : Any , lowerCAmelCase : Tuple=3_0522 , lowerCAmelCase : Tuple=768 , lowerCAmelCase : Any=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Any=3072 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : int="absolute" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , **lowerCAmelCase : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase)
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Tuple = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : Optional[Any] = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_snake_case : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 317 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
UpperCamelCase_ : str
UpperCamelCase_ : int
def __UpperCAmelCase ( a_: str ):
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def __UpperCAmelCase ( a_: str ):
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_UpperCAmelCase : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def __UpperCAmelCase ( a_: str, a_: int ):
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_UpperCAmelCase : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_UpperCAmelCase : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCAmelCase : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__a = 'Provide a string that I will generate its BWT transform: '
__a = input(entry_msg).strip()
__a = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
__a = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 145 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ["""names""", """prefix"""]
a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a__ = ["""encoding_errors""", """on_bad_lines"""]
a__ = ["""date_format"""]
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : str = ","
snake_case_ : Optional[str] = None
snake_case_ : Optional[Union[int, List[int], str]] = "infer"
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case_ : Optional[Union[List[int], List[str]]] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case_ : Optional[list] = None
snake_case_ : Optional[list] = None
snake_case_ : bool = False
snake_case_ : Optional[Union[int, List[int]]] = None
snake_case_ : Optional[int] = None
snake_case_ : Optional[Union[str, List[str]]] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[str] = None
snake_case_ : str = "."
snake_case_ : Optional[str] = None
snake_case_ : str = '"'
snake_case_ : int = 0
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : int = 0
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : Optional[str] = None
snake_case_ : int = 1_00_00
snake_case_ : Optional[datasets.Features] = None
snake_case_ : Optional[str] = "strict"
snake_case_ : Literal["error", "warn", "skip"] = "error"
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.delimiter is not None:
_snake_case : str = self.delimiter
if self.column_names is not None:
_snake_case : str = self.column_names
@property
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Union[str, Any] = CsvConfig
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : int = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : int = [files]
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : List[str] = [files]
_snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_snake_case : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()):
# cheaper cast
_snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
_snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase):
_snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''')
raise
| 317 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ (_UpperCAmelCase):
return getitem, k
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return setitem, k, v
def lowerCamelCase__ (_UpperCAmelCase):
return delitem, k
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase):
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__), None
except Exception as e:
return None, e
a_ : List[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ : str = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ : Tuple = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items'),
pytest.param(_overwrite_items , id='overwrite items'),
pytest.param(_delete_items , id='delete items'),
pytest.param(_access_absent_items , id='access absent items'),
pytest.param(_add_with_resize_up , id='add with resize up'),
pytest.param(_add_with_resize_down , id='add with resize down'),
) , )
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__):
SCREAMING_SNAKE_CASE = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__)
SCREAMING_SNAKE_CASE = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__)
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__) == str(SCREAMING_SNAKE_CASE__)
assert set(SCREAMING_SNAKE_CASE__) == set(SCREAMING_SNAKE_CASE__)
assert len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ():
def is_public(_UpperCAmelCase) -> bool:
return not name.startswith('_')
SCREAMING_SNAKE_CASE = {name for name in dir({}) if is_public(SCREAMING_SNAKE_CASE__)}
SCREAMING_SNAKE_CASE = {name for name in dir(HashMap()) if is_public(SCREAMING_SNAKE_CASE__)}
assert dict_public_names > hash_public_names
| 137 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : torch.FloatTensor
lowerCamelCase__ : torch.FloatTensor
class lowerCamelCase (SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : Any = 1
@register_to_config
def __init__( self : Dict , __UpperCAmelCase : int = 2_0_0_0 , __UpperCAmelCase : float = 0.15 , __UpperCAmelCase : float = 0.01 , __UpperCAmelCase : float = 1_3_4_8.0 , __UpperCAmelCase : float = 1e-5 , __UpperCAmelCase : int = 1 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ = None
self.set_sigmas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : float = None , __UpperCAmelCase : Union[str, torch.device] = None ) -> int:
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , __UpperCAmelCase , __UpperCAmelCase , device=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : float = None , __UpperCAmelCase : float = None , __UpperCAmelCase : float = None ) -> str:
SCREAMING_SNAKE_CASE__ = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ = torch.exp(torch.linspace(math.log(__UpperCAmelCase ) , math.log(__UpperCAmelCase ) , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
SCREAMING_SNAKE_CASE__ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ = self.get_adjacent_sigma(__UpperCAmelCase , __UpperCAmelCase ).to(sample.device )
SCREAMING_SNAKE_CASE__ = torch.zeros_like(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCAmelCase , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCAmelCase , prev_sample_mean=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCAmelCase ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ = noise + original_samples
return noisy_samples
def __len__( self : str ) -> int:
return self.config.num_train_timesteps
| 165 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ = logging.get_logger(__name__)
# General docstring
a__ = """RegNetConfig"""
# Base docstring
a__ = """facebook/regnet-y-040"""
a__ = [1, 10_88, 7, 7]
# Image classification docstring
a__ = """facebook/regnet-y-040"""
a__ = """tabby, tabby cat"""
a__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : int = nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_snake_case : List[Any] = nn.BatchNormad(lowerCAmelCase)
_snake_case : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Tuple = self.convolution(lowerCAmelCase)
_snake_case : Any = self.normalization(lowerCAmelCase)
_snake_case : List[Any] = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
_snake_case : Dict = config.num_channels
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
_snake_case : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
_snake_case : Any = self.embedder(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase)
_snake_case : Tuple = nn.BatchNormad(lowerCAmelCase)
def UpperCamelCase_ ( self : int , lowerCAmelCase : Tensor) -> Tensor:
"""simple docstring"""
_snake_case : Optional[Any] = self.convolution(lowerCAmelCase)
_snake_case : Optional[int] = self.normalization(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int) -> Any:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1))
_snake_case : Optional[Any] = nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1) , nn.Sigmoid() , )
def UpperCamelCase_ ( self : Any , lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Dict = self.pooler(lowerCAmelCase)
_snake_case : List[str] = self.attention(lowerCAmelCase)
_snake_case : str = hidden_state * attention
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_snake_case : Optional[int] = in_channels != out_channels or stride != 1
_snake_case : Optional[Any] = max(1 , out_channels // config.groups_width)
_snake_case : Union[str, Any] = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = hidden_state
_snake_case : int = self.layer(lowerCAmelCase)
_snake_case : Dict = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : str = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_snake_case : int = in_channels != out_channels or stride != 1
_snake_case : Dict = max(1 , out_channels // config.groups_width)
_snake_case : Tuple = (
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase) if should_apply_shortcut else nn.Identity()
)
_snake_case : Dict = nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase) , )
_snake_case : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[Any]) -> Tuple:
"""simple docstring"""
_snake_case : Tuple = hidden_state
_snake_case : List[Any] = self.layer(lowerCAmelCase)
_snake_case : List[str] = self.shortcut(lowerCAmelCase)
hidden_state += residual
_snake_case : int = self.activation(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> int:
"""simple docstring"""
super().__init__()
_snake_case : Optional[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
_snake_case : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) for _ in range(depth - 1)] , )
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.layers(lowerCAmelCase)
return hidden_state
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig) -> List[str]:
"""simple docstring"""
super().__init__()
_snake_case : Dict = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
_snake_case : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:]):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase))
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_snake_case : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_snake_case : Optional[int] = hidden_states + (hidden_state,)
_snake_case : Dict = stage_module(lowerCAmelCase)
if output_hidden_states:
_snake_case : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = RegNetConfig
snake_case_ : List[Any] = """regnet"""
snake_case_ : Any = """pixel_values"""
snake_case_ : Optional[Any] = True
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[str]) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""")
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=False) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : Optional[Any] = value
a__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Any = config
_snake_case : Any = RegNetEmbeddings(lowerCAmelCase)
_snake_case : Dict = RegNetEncoder(lowerCAmelCase)
_snake_case : Tuple = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : str = self.embedder(lowerCAmelCase)
_snake_case : Optional[Any] = self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : Tuple = encoder_outputs[0]
_snake_case : Optional[Any] = self.pooler(lowerCAmelCase)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,SCREAMING_SNAKE_CASE_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase)
_snake_case : Union[str, Any] = config.num_labels
_snake_case : List[Any] = RegNetModel(lowerCAmelCase)
# classification head
_snake_case : Union[str, Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : int , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_snake_case : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case : Tuple = self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase)
_snake_case : str = outputs.pooler_output if return_dict else outputs[1]
_snake_case : Optional[Any] = self.classifier(lowerCAmelCase)
_snake_case : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case : List[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case : Optional[int] = """single_label_classification"""
else:
_snake_case : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case : List[str] = MSELoss()
if self.num_labels == 1:
_snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze())
else:
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
elif self.config.problem_type == "single_label_classification":
_snake_case : Dict = CrossEntropyLoss()
_snake_case : int = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
_snake_case : Optional[int] = BCEWithLogitsLoss()
_snake_case : List[str] = loss_fct(lowerCAmelCase , lowerCAmelCase)
if not return_dict:
_snake_case : Optional[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states)
| 317 | 0 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase ( A_ )-> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def lowercase ( A_ )-> int:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
a : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 40 |
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list:
_snake_case : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Optional[int] = j
return prefix_result
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
'''
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=8 ) -> Tuple:
UpperCamelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, movq=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
if latents is None:
UpperCamelCase : Dict = randn_tensor(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : List[str] = latents.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCamelCase : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version('>=', '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCamelCase : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu', silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase : int = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
UpperCamelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self ) -> Any:
if not hasattr(self.unet, '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 512, SCREAMING_SNAKE_CASE_ = 100, SCREAMING_SNAKE_CASE_ = 4.0, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "pil", SCREAMING_SNAKE_CASE_ = True, ) -> List[str]:
UpperCamelCase : Dict = self._execution_device
UpperCamelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = torch.cat(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCamelCase : Tuple = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : Union[str, Any] = hint.repeat_interleave(SCREAMING_SNAKE_CASE_, dim=0 )
UpperCamelCase : int = torch.cat([negative_image_embeds, image_embeds], dim=0 ).to(dtype=self.unet.dtype, device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.cat([hint, hint], dim=0 ).to(dtype=self.unet.dtype, device=SCREAMING_SNAKE_CASE_ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_, device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.scheduler.timesteps
UpperCamelCase : List[Any] = self.movq.config.latent_channels
UpperCamelCase : Optional[int] = downscale_height_and_width(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.movq_scale_factor )
# create initial latent
UpperCamelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.scheduler, )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Optional[int] = {"""image_embeds""": image_embeds, """hint""": hint}
UpperCamelCase : Optional[Any] = self.unet(
sample=SCREAMING_SNAKE_CASE_, timestep=SCREAMING_SNAKE_CASE_, encoder_hidden_states=SCREAMING_SNAKE_CASE_, added_cond_kwargs=SCREAMING_SNAKE_CASE_, return_dict=SCREAMING_SNAKE_CASE_, )[0]
if do_classifier_free_guidance:
UpperCamelCase : Any = noise_pred.split(latents.shape[1], dim=1 )
UpperCamelCase : Union[str, Any] = noise_pred.chunk(2 )
UpperCamelCase : List[Any] = variance_pred.chunk(2 )
UpperCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : int = torch.cat([noise_pred, variance_pred_text], dim=1 )
if not (
hasattr(self.scheduler.config, 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase : Any = noise_pred.split(latents.shape[1], dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : Optional[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, )[0]
# post-processing
UpperCamelCase : Optional[int] = self.movq.decode(SCREAMING_SNAKE_CASE_, force_not_quantize=SCREAMING_SNAKE_CASE_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Optional[Any] = image * 0.5 + 0.5
UpperCamelCase : Union[str, Any] = image.clamp(0, 1 )
UpperCamelCase : List[Any] = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 119 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 0 |
import datasets
from .evaluate import evaluate
_UpperCAmelCase : Union[str, Any] = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase : Any = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase : Any = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def a ( self , snake_case , snake_case ):
snake_case_ = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case_ = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case_ = evaluate(dataset=snake_case , predictions=snake_case )
return score
| 285 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase__ : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase__ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('''\n'''.join(upper_files) + '''\n''')
lowercase__ : Dict = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('''\n'''.join(space_files) + '''\n''')
lowercase__ : Union[str, Any] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('''\n'''.join(hyphen_files) + '''\n''')
lowercase__ : int = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('''\n'''.join(nodir_files) + '''\n''')
lowercase__ : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 190 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : List[Any] = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['ViTFeatureExtractor']
lowerCAmelCase : str = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.