code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
# fmt: off
__snake_case : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__snake_case : List[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__snake_case : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__snake_case : str = {'unk_token': '<unk>'}
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = 'lower newer'
__snake_case : int = 'lower newer'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : Dict = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__snake_case : str = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
__snake_case : Any = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
@require_ftfy
def lowercase_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : Dict = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
__snake_case : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__snake_case : List[str] = tokenizer_s.tokenize(UpperCAmelCase_ )
__snake_case : int = tokenizer_r.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__snake_case : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__snake_case : Optional[Any] = tokenizer_s.tokenize(UpperCAmelCase_ )
__snake_case : Any = tokenizer_r.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Test that the tokenization is identical on unicode of space type
__snake_case : Any = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__snake_case : Any = tokenizer_s.tokenize(UpperCAmelCase_ )
__snake_case : Optional[Any] = tokenizer_r.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Test that the tokenization is identical on unicode of line break type
__snake_case : List[str] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__snake_case : Optional[Any] = tokenizer_s.tokenize(UpperCAmelCase_ )
__snake_case : Optional[Any] = tokenizer_r.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : List[Any] = F"""{text_of_1_token} {text_of_1_token}"""
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , )
__snake_case : List[str] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase_ ) + 1, len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
__snake_case : Optional[int] = F""" {text}"""
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase_ , use_fast=UpperCAmelCase_ , )
__snake_case : List[Any] = tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_ ) + 1, 1 + len(UpperCAmelCase_ ) + 1 + len(UpperCAmelCase_ )) , )
def lowercase_ ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def lowercase_ ( self ):
super().test_tokenization_python_rust_equals()
def lowercase_ ( self ):
# CLIP always lower cases letters
pass | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__magic_name__ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase__( __UpperCAmelCase : Dict = "mumbai" ):
__snake_case : Tuple = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__snake_case : Dict = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__snake_case : Optional[int] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__magic_name__ = '''\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'''
__magic_name__ = '''\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'''
__magic_name__ = '''\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=1 , _UpperCAmelCase="binary" , _UpperCAmelCase=None , _UpperCAmelCase="warn" , ):
__snake_case : Any = recall_score(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE , zero_division=_SCREAMING_SNAKE_CASE , )
return {"recall": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def lowercase_ ( self , _UpperCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def lowercase_ ( self ):
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = len(_UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
# get prompt text embeddings
__snake_case : int = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case : Dict = text_embeddings.shape
__snake_case : Any = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
__snake_case : Any = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Union[str, Any] = [""""""]
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !="""
F""" {type(_UpperCAmelCase )}.""" )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Any = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
__snake_case : Optional[int] = negative_prompt
__snake_case : List[Any] = text_input_ids.shape[-1]
__snake_case : Tuple = self.tokenizer(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt' , )
__snake_case : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Union[str, Any] = uncond_embeddings.shape[1]
__snake_case : Tuple = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : int = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(self.device )
__snake_case : Union[str, Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to(
self.device )
else:
__snake_case : List[str] = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
__snake_case : List[str] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case : List[str] = latents_reference.to(self.device )
__snake_case : Optional[int] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : Any = 0 if dx < 0 else dx
__snake_case : Tuple = 0 if dy < 0 else dy
__snake_case : Optional[Any] = max(-dx , 0 )
__snake_case : int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Any = {}
if accepts_eta:
__snake_case : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__snake_case : Any = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case : List[Any] = noise_pred.chunk(2 )
__snake_case : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Dict = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = 1 / 0.18215 * latents
__snake_case : Dict = self.vae.decode(_UpperCAmelCase ).sample
__snake_case : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors='pt' ).to(
self.device )
__snake_case : Tuple = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : int = None
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
__snake_case : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
return image
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ):
__snake_case : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
__snake_case : List[str] = val
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case : List[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__snake_case : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__snake_case : Tuple = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) )
__snake_case : Optional[Any] = qkv_bias
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
__snake_case : Optional[int] = 3_64 if 'coco' in model_name else 2_24
__snake_case : Tuple = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__snake_case : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__snake_case : Optional[int] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
__snake_case : Tuple = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__snake_case : Dict = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
__snake_case : Any = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=False ):
__snake_case : List[str] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
__snake_case : Union[str, Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__snake_case : List[str] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
__snake_case , __snake_case : Tuple = get_blipa_config(SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
__snake_case : str = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
__snake_case , __snake_case : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__snake_case : Any = 'cuda:1' if torch.cuda.is_available() else 'cpu'
__snake_case : Optional[int] = 'cuda:2' if torch.cuda.is_available() else 'cpu'
__snake_case , __snake_case , __snake_case : str = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
original_model.eval()
print('Done!' )
# update state dict keys
__snake_case : List[str] = original_model.state_dict()
__snake_case : Dict = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith('Qformer.bert' ):
__snake_case : Union[str, Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__snake_case : str = key.replace('self' , 'attention' )
if "llm_proj" in key:
__snake_case : Tuple = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
__snake_case : Tuple = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
__snake_case : str = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
__snake_case : Optional[int] = key.replace('t5' , 'language' )
__snake_case : Any = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = load_demo_image()
__snake_case : List[Any] = 'What is unusual about this image?'
# create processor
__snake_case : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = InstructBlipProcessor(
image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , )
__snake_case : str = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# make sure processor creates exact same pixel values
__snake_case : Optional[int] = vis_processors['eval'](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
hf_model.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
if "vicuna" in model_name:
__snake_case : Union[str, Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
__snake_case : Optional[Any] = hf_model(**SCREAMING_SNAKE_CASE_ ).logits
else:
__snake_case : Union[str, Any] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
__snake_case : Dict = tokenizer('\n' , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case : int = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__snake_case : List[str] = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
print('Looks ok!' )
print('Generating with original model...' )
__snake_case : List[str] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
__snake_case : Union[str, Any] = hf_model.generate(
**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__snake_case : Dict = 2
print('Original generation:' , SCREAMING_SNAKE_CASE_ )
__snake_case : int = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = [text.strip() for text in output_text]
print('HF generation:' , SCREAMING_SNAKE_CASE_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__magic_name__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ):
__snake_case : List[str] = list(range(len(_lowerCAmelCase ) ) )
__snake_case : Dict = [v / w for v, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
index.sort(key=lambda __UpperCAmelCase : ratio[i] , reverse=_lowerCAmelCase )
__snake_case : float = 0
__snake_case : list[float] = [0] * len(_lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
__snake_case : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
__snake_case : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
def UpperCAmelCase__( ):
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Optional[int] = parser.parse_args()
return args.f
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : List[str] = {}
__snake_case : int = os.path.join(__UpperCAmelCase , 'all_results.json' )
if os.path.exists(__UpperCAmelCase ):
with open(__UpperCAmelCase , 'r' ) as f:
__snake_case : Dict = json.load(__UpperCAmelCase )
else:
raise ValueError(F"""can\'t find {path}""" )
return results
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__):
"""simple docstring"""
@classmethod
def lowercase_ ( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : List[Any] = tempfile.mkdtemp()
__snake_case : Union[str, Any] = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
__snake_case : Optional[int] = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : str = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(_UpperCAmelCase )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Any = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__snake_case : Union[str, Any] = get_results(_UpperCAmelCase )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Dict = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : List[Any] = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : List[Any] = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__snake_case : Optional[Any] = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__snake_case : Union[str, Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Optional[int] = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__snake_case : Any = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Any = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'translation_no_trainer' ) ) )
@slow
def lowercase_ ( self ):
__snake_case : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
__snake_case : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self ):
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Optional[int] = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[Any] = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , 'image_classification_no_trainer' ) ) )
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Tuple:
__snake_case : Union[str, Any] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase__( ) -> Tuple:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __a):
"""simple docstring"""
__UpperCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PIL.Image.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(**lowerCAmelCase_ )
__snake_case : Optional[int] = size if size is not None else {'height': 256, 'width': 256}
__snake_case : Optional[int] = get_size_dict(lowerCAmelCase_ )
__snake_case : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__snake_case : List[Any] = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__snake_case : Dict = do_resize
__snake_case : Dict = size
__snake_case : Union[str, Any] = resample
__snake_case : str = do_center_crop
__snake_case : Union[str, Any] = crop_size
__snake_case : Dict = do_rescale
__snake_case : List[str] = rescale_factor
__snake_case : Tuple = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PIL.Image.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size['height'], size['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : Union[str, Any] = resample if resample is not None else self.resample
__snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Dict = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : str = image_mean if image_mean is not None else self.image_mean
__snake_case : Any = image_std if image_std is not None else self.image_std
__snake_case : Optional[Any] = size if size is not None else self.size
__snake_case : str = get_size_dict(lowerCAmelCase_ )
__snake_case : int = crop_size if crop_size is not None else self.crop_size
__snake_case : List[str] = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__snake_case : int = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__snake_case : Dict = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__snake_case : int = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
__snake_case : Optional[Any] = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
__snake_case : Tuple = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
__snake_case : Optional[Any] = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
__snake_case : Optional[int] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__snake_case : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def lowercase_ ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
def UpperCAmelCase__( __UpperCAmelCase : Image ):
__snake_case : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __a )
import datasets
__snake_case : Union[str, Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__snake_case : Optional[int] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def lowercase_ ( self ):
pass
@slow
@require_torch
def lowercase_ ( self ):
__snake_case : str = """Intel/dpt-large"""
__snake_case : List[Any] = pipeline('depth-estimation' , model=__a )
__snake_case : List[str] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
__snake_case : Dict = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def lowercase_ ( self ):
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : int ):
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_UpperCamelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(**UpperCAmelCase__ )
__snake_case : str = size if size is not None else {'shortest_edge': 256}
__snake_case : int = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__snake_case : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__snake_case : Tuple = get_size_dict(UpperCAmelCase__ , param_name='crop_size' )
__snake_case : Optional[int] = do_resize
__snake_case : Optional[Any] = size
__snake_case : Optional[int] = do_center_crop
__snake_case : Any = crop_size
__snake_case : Tuple = resample
__snake_case : int = do_rescale
__snake_case : int = rescale_factor
__snake_case : Optional[int] = offset
__snake_case : Optional[Any] = do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[str] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" in size:
__snake_case : List[str] = get_resize_output_image_size(UpperCAmelCase__ , size['shortest_edge'] , default_to_square=UpperCAmelCase__ )
elif "height" in size and "width" in size:
__snake_case : Optional[Any] = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : str = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size['height'], size['width']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = image.astype(np.floataa )
if offset:
__snake_case : Optional[int] = image - (scale / 2)
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__snake_case : Optional[int] = to_numpy_array(UpperCAmelCase__ )
if do_resize:
__snake_case : Any = self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ )
if do_center_crop:
__snake_case : Union[str, Any] = self.center_crop(UpperCAmelCase__ , size=UpperCAmelCase__ )
if do_rescale:
__snake_case : List[str] = self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , offset=UpperCAmelCase__ )
if do_normalize:
__snake_case : Union[str, Any] = self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ )
__snake_case : str = to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ )
return image
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : int = resample if resample is not None else self.resample
__snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : int = offset if offset is not None else self.offset
__snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : Any = image_std if image_std is not None else self.image_std
__snake_case : Optional[int] = size if size is not None else self.size
__snake_case : Dict = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
__snake_case : str = crop_size if crop_size is not None else self.crop_size
__snake_case : List[str] = get_size_dict(UpperCAmelCase__ , param_name='crop_size' )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__snake_case : Optional[int] = make_batched(UpperCAmelCase__ )
__snake_case : Any = [
[
self._preprocess_image(
image=UpperCAmelCase__ , do_resize=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , do_center_crop=UpperCAmelCase__ , crop_size=UpperCAmelCase__ , do_rescale=UpperCAmelCase__ , rescale_factor=UpperCAmelCase__ , offset=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , )
for img in video
]
for video in videos
]
__snake_case : Any = {'pixel_values': videos}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( __a):
"""simple docstring"""
__UpperCAmelCase = "codegen"
__UpperCAmelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50_400 , _UpperCAmelCase=2_048 , _UpperCAmelCase=2_048 , _UpperCAmelCase=4_096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50_256 , _UpperCAmelCase=50_256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
__snake_case : List[Any] = vocab_size
__snake_case : List[str] = n_ctx
__snake_case : Tuple = n_positions
__snake_case : List[str] = n_embd
__snake_case : Any = n_layer
__snake_case : Dict = n_head
__snake_case : List[Any] = n_inner
__snake_case : List[str] = rotary_dim
__snake_case : List[Any] = activation_function
__snake_case : List[Any] = resid_pdrop
__snake_case : List[str] = embd_pdrop
__snake_case : Optional[Any] = attn_pdrop
__snake_case : str = layer_norm_epsilon
__snake_case : Dict = initializer_range
__snake_case : Any = use_cache
__snake_case : Optional[Any] = bos_token_id
__snake_case : Optional[int] = eos_token_id
super().__init__(
bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ )
class __SCREAMING_SNAKE_CASE ( __a):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(A__ , task=A__ , patching_specs=A__ , use_past=A__ )
if not getattr(self._config , 'pad_token_id' , A__ ):
# TODO: how to do that better?
__snake_case : Optional[Any] = 0
@property
def lowercase_ ( self ):
__snake_case : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction='inputs' )
__snake_case : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase_ ( self ):
return self._config.n_layer
@property
def lowercase_ ( self ):
return self._config.n_head
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
__snake_case : Optional[int] = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
__snake_case : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__snake_case , __snake_case : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case : Optional[Any] = seqlen + 2
__snake_case : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case : List[Any] = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
__snake_case : str = common_inputs['attention_mask']
if self.use_past:
__snake_case : Any = ordered_inputs['attention_mask'].dtype
__snake_case : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def lowercase_ ( self ):
return 13
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple = None , __UpperCAmelCase : int = None , __UpperCAmelCase : List[str] = False , ):
__snake_case : Union[str, Any] = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__snake_case : int = {
'a': 0.08497,
'b': 0.01492,
'c': 0.02202,
'd': 0.04253,
'e': 0.11162,
'f': 0.02228,
'g': 0.02015,
'h': 0.06094,
'i': 0.07546,
'j': 0.00153,
'k': 0.01292,
'l': 0.04025,
'm': 0.02406,
'n': 0.06749,
'o': 0.07507,
'p': 0.01929,
'q': 0.00095,
'r': 0.07587,
's': 0.06327,
't': 0.09356,
'u': 0.02758,
'v': 0.00978,
'w': 0.02560,
'x': 0.00150,
'y': 0.01994,
'z': 0.00077,
}
else:
# Custom frequencies dictionary
__snake_case : Union[str, Any] = frequencies_dict
if not case_sensitive:
__snake_case : Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
__snake_case : List[str] = {}
# cycle through all of the shifts
for shift in range(len(_SCREAMING_SNAKE_CASE ) ):
__snake_case : List[Any] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__snake_case : Union[str, Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__snake_case : Any = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__snake_case : int = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__snake_case : Optional[int] = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__snake_case : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__snake_case : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__snake_case : Any = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__snake_case : Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__snake_case : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__snake_case : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__UpperCAmelCase : Dict ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__snake_case : Optional[int] = min(
_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__snake_case
) , (
__snake_case
) ,
) : Dict = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
from __future__ import annotations
import requests
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : int = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowercase__ ).json()
def UpperCAmelCase__( __UpperCAmelCase : Dict = 10 ):
__snake_case : List[str] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__snake_case : Any = requests.get(lowercase__ ).json()[:max_stories]
return [get_hackernews_story(lowercase__ ) for story_id in story_ids]
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] = 10 ):
__snake_case : str = hackernews_top_stories(lowercase__ )
return "\n".join('* [{title}]({url})'.format(**lowercase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError('String lengths must match!' )
__snake_case : List[str] = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__magic_name__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase__( __UpperCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__( __UpperCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case : List[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
def UpperCAmelCase__( __UpperCAmelCase : int = 60_08_51_47_51_43 ):
try:
__snake_case : Optional[Any] = int(a_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__snake_case : List[str] = 2
__snake_case : Tuple = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__snake_case : Optional[Any] = i
while n % i == 0:
__snake_case : Union[str, Any] = n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = MgpstrTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = {}
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
# fmt: off
__snake_case : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__snake_case : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
def lowercase_ ( self , **_UpperCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = "tester"
__snake_case : Optional[int] = "tester"
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[int] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Tuple = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({'cls_token': special_token} )
__snake_case : List[str] = tokenizer.encode([special_token] , add_special_tokens=__a )
self.assertEqual(len(__a ) , 1 )
__snake_case : str = tokenizer.decode(__a , skip_special_tokens=__a )
self.assertTrue(special_token not in decoded )
def lowercase_ ( self ):
__snake_case : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Dict = self.get_input_output_texts(__a )
__snake_case : Tuple = tokenizer.tokenize(__a )
__snake_case : Dict = tokenizer.convert_tokens_to_ids(__a )
__snake_case : str = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertNotEqual(len(__a ) , 0 )
__snake_case : Dict = tokenizer.decode(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(text_a.replace(' ' , '' ) , __a )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def lowercase_ ( self ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def lowercase_ ( self ):
pass
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if "emb" in name:
__snake_case : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
__snake_case : Union[str, Any] = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
__snake_case : List[str] = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
__snake_case : str = name.replace('linear1' , 'fc1' )
if "linear2" in name:
__snake_case : int = name.replace('linear2' , 'fc2' )
if "norm1" in name:
__snake_case : Union[str, Any] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
__snake_case : Optional[int] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
__snake_case : str = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
__snake_case : Optional[int] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
__snake_case : Optional[int] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
__snake_case : Tuple = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Dict ):
__snake_case : Optional[int] = list(state_dict.keys() )
__snake_case : Union[str, Any] = {}
for key in keys:
__snake_case : str = state_dict.pop(__UpperCamelCase )
__snake_case : Dict = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__snake_case : Union[str, Any] = val[:hidden_size, :]
__snake_case : Any = val[hidden_size : 2 * hidden_size, :]
__snake_case : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__snake_case : List[Any] = val
else:
__snake_case : int = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase__( __UpperCAmelCase : int ):
if checkpoint == "small":
# default config values
__snake_case : Optional[int] = 10_24
__snake_case : List[Any] = 24
__snake_case : Optional[int] = 16
elif checkpoint == "medium":
__snake_case : Optional[int] = 15_36
__snake_case : str = 48
__snake_case : Union[str, Any] = 24
elif checkpoint == "large":
__snake_case : Optional[int] = 20_48
__snake_case : str = 48
__snake_case : int = 32
else:
raise ValueError(F"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" )
__snake_case : Dict = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , )
return config
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]="cpu" ):
__snake_case : List[Any] = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase )
__snake_case : Union[str, Any] = decoder_config_from_checkpoint(__UpperCamelCase )
__snake_case : Optional[int] = fairseq_model.lm.state_dict()
__snake_case , __snake_case : int = rename_state_dict(
__UpperCamelCase , hidden_size=decoder_config.hidden_size )
__snake_case : List[Any] = TaEncoderModel.from_pretrained('t5-base' )
__snake_case : Union[str, Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
__snake_case : Tuple = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__snake_case , __snake_case : Dict = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__UpperCamelCase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__snake_case : Union[str, Any] = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
__snake_case : str = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__snake_case : int = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__snake_case : List[Any] = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
__snake_case : int = AutoTokenizer.from_pretrained('t5-base' )
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
__snake_case : Dict = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
__snake_case : str = 20_48
__snake_case : str = 20_48
# set other default generation config params
__snake_case : Optional[int] = int(30 * audio_encoder.config.frame_rate )
__snake_case : str = True
__snake_case : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__magic_name__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Dict = feature_size
__snake_case : Union[str, Any] = sampling_rate
__snake_case : Optional[Any] = padding_value
__snake_case : Optional[Any] = kwargs.pop('padding_side' , 'right' )
__snake_case : str = kwargs.pop('return_attention_mask' , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case : List[str] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__snake_case : Tuple = processed_features[self.model_input_names[0]]
__snake_case : Optional[Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_lowerCAmelCase ) == 0:
if return_attention_mask:
__snake_case : List[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case : Dict = required_input[0]
if isinstance(_lowerCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case : Union[str, Any] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_lowerCAmelCase ):
__snake_case : str = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_lowerCAmelCase ):
__snake_case : List[Any] = 'tf'
elif is_torch_tensor(_lowerCAmelCase ):
__snake_case : List[str] = 'pt'
elif isinstance(_lowerCAmelCase , (int, float, list, tuple, np.ndarray) ):
__snake_case : Union[str, Any] = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(_lowerCAmelCase )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case : List[str] = to_numpy(_lowerCAmelCase )
else:
__snake_case : str = [to_numpy(_lowerCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case : Optional[int] = self._get_padding_strategies(padding=_lowerCAmelCase , max_length=_lowerCAmelCase )
__snake_case : Dict = processed_features[self.model_input_names[0]]
__snake_case : int = len(_lowerCAmelCase )
if not all(len(_lowerCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
__snake_case : List[Any] = []
for i in range(_lowerCAmelCase ):
__snake_case : str = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case : Optional[Any] = self._truncate(
_lowerCAmelCase , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
truncated_inputs.append(_lowerCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case : int = PaddingStrategy.MAX_LENGTH
__snake_case : Union[str, Any] = {}
for i in range(_lowerCAmelCase ):
# padding
__snake_case : str = self._pad(
truncated_inputs[i] , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case : List[str] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case : Optional[Any] = value.astype(np.floataa )
batch_outputs[key].append(_lowerCAmelCase )
return BatchFeature(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case : Any = len(_lowerCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_lowerCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case : List[Any] = np.ones(len(_lowerCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case : List[Any] = max_length - len(_lowerCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
__snake_case : Optional[int] = np.pad(
processed_features['attention_mask'] , (0, difference) )
__snake_case : Optional[int] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case : List[str] = np.pad(
_lowerCAmelCase , _lowerCAmelCase , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case : Optional[Any] = np.pad(
processed_features['attention_mask'] , (difference, 0) )
__snake_case : Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case : List[Any] = np.pad(
_lowerCAmelCase , _lowerCAmelCase , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
__snake_case : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case : str = len(_lowerCAmelCase ) > max_length
if needs_to_be_truncated:
__snake_case : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case : Any = processed_features['attention_mask'][:max_length]
return processed_features
def lowercase_ ( self , _UpperCAmelCase=False , _UpperCAmelCase=None ):
if padding is not False:
if padding is True:
__snake_case : List[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Tuple = PaddingStrategy(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = padding
else:
__snake_case : Dict = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ):
__snake_case : int = 0
__snake_case : Any = 0
__snake_case : Union[str, Any] = {}
def lowercase_ ( self , _UpperCAmelCase ):
if vertex not in self.adjacency:
__snake_case : Any = {}
self.num_vertices += 1
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
__snake_case : Optional[int] = weight
__snake_case : int = weight
def lowercase_ ( self ):
__snake_case : List[str] = self.get_edges()
for edge in edges:
__snake_case : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
__snake_case : Optional[int] = list(edges[i] )
edges.sort(key=lambda _UpperCAmelCase : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__snake_case : Optional[Any] = edges[i][2] + 1
for edge in edges:
__snake_case : Any = edge
__snake_case : Any = weight
__snake_case : Optional[Any] = weight
def __str__( self ):
__snake_case : Any = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__snake_case : str = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowercase_ ( self ):
__snake_case : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ):
return self.adjacency.keys()
@staticmethod
def lowercase_ ( _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : str = Graph()
if vertices is None:
__snake_case : List[Any] = []
if edges is None:
__snake_case : Optional[int] = []
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ):
__snake_case : int = {}
__snake_case : List[str] = {}
def __len__( self ):
return len(self.parent )
def lowercase_ ( self , _UpperCAmelCase ):
if item in self.parent:
return self.find(_a )
__snake_case : Union[str, Any] = item
__snake_case : Any = 0
return item
def lowercase_ ( self , _UpperCAmelCase ):
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
__snake_case : Tuple = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = self.find(_a )
__snake_case : Any = self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__snake_case : Optional[int] = roota
return roota
if self.rank[roota] < self.rank[roota]:
__snake_case : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__snake_case : Optional[int] = roota
return roota
return None
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
__snake_case : List[Any] = graph.num_vertices
__snake_case : Optional[int] = Graph.UnionFind()
__snake_case : List[str] = []
while num_components > 1:
__snake_case : Union[str, Any] = {}
for vertex in graph.get_vertices():
__snake_case : List[Any] = -1
__snake_case : List[str] = graph.get_edges()
for edge in edges:
__snake_case : Any = edge
edges.remove((tail, head, weight) )
for edge in edges:
__snake_case : int = edge
__snake_case : Tuple = union_find.find(_a )
__snake_case : str = union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case : Optional[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__snake_case : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__snake_case : Union[str, Any] = cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
__snake_case : Any = num_components - 1
__snake_case : List[str] = Graph.build(edges=_a )
return mst
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Optional[int] = abs(__UpperCamelCase )
__snake_case : int = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : int = abs(__UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase__( __UpperCAmelCase : int ):
return sum(int(__UpperCamelCase ) for c in str(abs(__UpperCamelCase ) ) )
def UpperCAmelCase__( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__UpperCAmelCase : Callable , __UpperCAmelCase : int ) -> None:
__snake_case : Dict = F"""{func.__name__}({value})"""
__snake_case : Any = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(__UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__UpperCamelCase , __UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ):
__snake_case : int = right or len(lowerCamelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCamelCase_ , lowerCamelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __SCREAMING_SNAKE_CASE ( _A):
"""simple docstring"""
__UpperCAmelCase = 4_2
class __SCREAMING_SNAKE_CASE ( _A , _A):
"""simple docstring"""
__UpperCAmelCase = True
@register_to_config
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = ("DownEncoderBlock2D",) , _UpperCAmelCase = ("UpDecoderBlock2D",) , _UpperCAmelCase = (64,) , _UpperCAmelCase = 1 , _UpperCAmelCase = "silu" , _UpperCAmelCase = 4 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = 0.18215 , ):
super().__init__()
# pass init params to Encoder
__snake_case : Union[str, Any] = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
# pass init params to Decoder
__snake_case : int = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , act_fn=_UpperCAmelCase , )
__snake_case : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case : List[Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
__snake_case : List[str] = False
__snake_case : List[str] = False
# only relevant if vae tiling is enabled
__snake_case : str = self.config.sample_size
__snake_case : Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case : str = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case : int = 0.25
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , (Encoder, Decoder) ):
__snake_case : List[str] = value
def lowercase_ ( self , _UpperCAmelCase = True ):
__snake_case : Any = use_tiling
def lowercase_ ( self ):
self.enable_tiling(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = True
def lowercase_ ( self ):
__snake_case : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self ):
__snake_case : List[Any] = {}
def fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
__snake_case : int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__snake_case : Optional[int] = [self.encoder(_UpperCAmelCase ) for x_slice in x.split(1 )]
__snake_case : Union[str, Any] = torch.cat(_UpperCAmelCase )
else:
__snake_case : Any = self.encoder(_UpperCAmelCase )
__snake_case : str = self.quant_conv(_UpperCAmelCase )
__snake_case : Dict = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
__snake_case : Dict = self.post_quant_conv(_UpperCAmelCase )
__snake_case : List[str] = self.decoder(_UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
@apply_forward_hook
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case : str = [self._decode(_UpperCAmelCase ).sample for z_slice in z.split(1 )]
__snake_case : Any = torch.cat(_UpperCAmelCase )
else:
__snake_case : List[Any] = self._decode(_UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = min(a.shape[2] , b.shape[2] , _UpperCAmelCase )
for y in range(_UpperCAmelCase ):
__snake_case : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = min(a.shape[3] , b.shape[3] , _UpperCAmelCase )
for x in range(_UpperCAmelCase ):
__snake_case : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True ):
__snake_case : int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case : Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case : List[Any] = []
for i in range(0 , x.shape[2] , _UpperCAmelCase ):
__snake_case : List[str] = []
for j in range(0 , x.shape[3] , _UpperCAmelCase ):
__snake_case : Union[str, Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case : List[Any] = self.encoder(_UpperCAmelCase )
__snake_case : Optional[Any] = self.quant_conv(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
__snake_case : Tuple = []
for i, row in enumerate(_UpperCAmelCase ):
__snake_case : Tuple = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case : List[Any] = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
__snake_case : List[str] = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
__snake_case : Tuple = torch.cat(_UpperCAmelCase , dim=2 )
__snake_case : Dict = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = True ):
__snake_case : Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case : Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case : List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case : List[Any] = []
for i in range(0 , z.shape[2] , _UpperCAmelCase ):
__snake_case : Tuple = []
for j in range(0 , z.shape[3] , _UpperCAmelCase ):
__snake_case : Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case : Union[str, Any] = self.post_quant_conv(_UpperCAmelCase )
__snake_case : Tuple = self.decoder(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
__snake_case : Optional[int] = []
for i, row in enumerate(_UpperCAmelCase ):
__snake_case : Optional[int] = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case : List[str] = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
__snake_case : str = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
__snake_case : Union[str, Any] = torch.cat(_UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , ):
__snake_case : Any = sample
__snake_case : Optional[Any] = self.encode(_UpperCAmelCase ).latent_dist
if sample_posterior:
__snake_case : Dict = posterior.sample(generator=_UpperCAmelCase )
else:
__snake_case : int = posterior.mode()
__snake_case : Optional[int] = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''PoolFormerFeatureExtractor''']
__magic_name__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
__magic_name__ = '''\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
__magic_name__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__magic_name__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_):
"""simple docstring"""
__UpperCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
super().__init__(**__a )
__snake_case : int = size if size is not None else {'shortest_edge': 224}
__snake_case : str = get_size_dict(__a , default_to_square=__a )
__snake_case : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__snake_case : str = get_size_dict(__a , default_to_square=__a , param_name='crop_size' )
__snake_case : Any = do_resize
__snake_case : Optional[int] = size
__snake_case : int = resample
__snake_case : List[Any] = do_center_crop
__snake_case : Tuple = crop_size
__snake_case : Union[str, Any] = do_rescale
__snake_case : int = rescale_factor
__snake_case : Tuple = do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case : Any = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case : int = do_convert_rgb
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[str] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case : str = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : List[Any] = size if size is not None else self.size
__snake_case : Any = get_size_dict(__a , param_name='size' , default_to_square=__a )
__snake_case : Optional[int] = resample if resample is not None else self.resample
__snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : str = crop_size if crop_size is not None else self.crop_size
__snake_case : Optional[int] = get_size_dict(__a , param_name='crop_size' , default_to_square=__a )
__snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
__snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
__snake_case : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case : str = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__snake_case : List[Any] = [to_numpy_array(__a ) for image in images]
if do_resize:
__snake_case : Union[str, Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__snake_case : Union[str, Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__snake_case : Any = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__snake_case : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__snake_case : int = [to_channel_dimension_format(__a , __a ) for image in images]
__snake_case : Tuple = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase ):
os.makedirs(__A , exist_ok=__A )
__snake_case : Any = {'source': 'What is love ?', 'target': 'life'}
__snake_case : Dict = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__snake_case : int = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(__A , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(__A )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = "pytorch" ):
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = os.path.join(__A , 'output' )
__snake_case : List[Any] = os.path.join(__A , 'data' )
self._create_dummy_data(data_dir=__A )
__snake_case : List[Any] = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
__snake_case : Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__A , env=self.get_env() )
__snake_case : Union[str, Any] = os.path.join(__A , 'metrics.json' )
with open(__A ) as f:
__snake_case : Any = json.load(__A )
return result
@require_torch_gpu
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def lowercase_ ( self ):
__snake_case : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def lowercase_ ( self ):
__snake_case : str = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowercase_ ( self ):
__snake_case : List[str] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
import qiskit
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int ):
__snake_case : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__snake_case : Union[str, Any] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__snake_case : Dict = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
__magic_name__ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]=False ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
__snake_case : Union[str, Any] = len(set_a.intersection(lowercase_ ) )
if alternative_union:
__snake_case : Union[str, Any] = len(lowercase_ ) + len(lowercase_ )
else:
__snake_case : Optional[int] = len(set_a.union(lowercase_ ) )
return intersection / union
if isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) ):
__snake_case : Any = [element for element in set_a if element in set_b]
if alternative_union:
__snake_case : Union[str, Any] = len(lowercase_ ) + len(lowercase_ )
return len(lowercase_ ) / union
else:
__snake_case : Optional[int] = set_a + [element for element in set_b if element not in set_a]
return len(lowercase_ ) / len(lowercase_ )
return len(lowercase_ ) / len(lowercase_ )
return None
if __name__ == "__main__":
__magic_name__ = {'a', 'b', 'c', 'd', 'e'}
__magic_name__ = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
__magic_name__ = {str(digit): digit**5 for digit in range(10)}
def UpperCAmelCase__( __UpperCAmelCase : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase__ ) )
def UpperCAmelCase__( ):
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(lowerCAmelCase__ ) )
if __name__ == "__main__":
print(solution())
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__snake_case : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__snake_case : int = [t[-1] for t in os.walk(os.path.join(UpperCamelCase__ , os.listdir(UpperCamelCase__ )[0] , 'snapshots' ) )]
__snake_case : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCamelCase__ )
__snake_case : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Optional[int] = jax.random.PRNGKey(0 )
__snake_case : List[str] = 4
__snake_case : Tuple = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : str = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
__snake_case : Dict = replicate(UpperCamelCase__ )
__snake_case : List[Any] = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : List[Any] = shard(UpperCamelCase__ )
__snake_case : Any = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
__snake_case : str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase__ ) == num_samples
def lowercase_ ( self ):
__snake_case , __snake_case : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCamelCase__ )
__snake_case : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : str = jax.random.PRNGKey(0 )
__snake_case : Any = 50
__snake_case : Optional[Any] = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : Dict = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
__snake_case : Dict = replicate(UpperCamelCase__ )
__snake_case : Tuple = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Any = shard(UpperCamelCase__ )
__snake_case : str = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def lowercase_ ( self ):
__snake_case , __snake_case : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ )
__snake_case : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Any = jax.random.PRNGKey(0 )
__snake_case : str = 50
__snake_case : Tuple = jax.device_count()
__snake_case : Dict = num_samples * [prompt]
__snake_case : Any = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
__snake_case : Tuple = replicate(UpperCamelCase__ )
__snake_case : Tuple = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Union[str, Any] = shard(UpperCamelCase__ )
__snake_case : Dict = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def lowercase_ ( self ):
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__snake_case : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Optional[int] = jax.random.PRNGKey(0 )
__snake_case : Optional[Any] = 50
__snake_case : List[str] = jax.device_count()
__snake_case : Dict = num_samples * [prompt]
__snake_case : int = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
__snake_case : List[str] = replicate(UpperCamelCase__ )
__snake_case : Dict = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Any = shard(UpperCamelCase__ )
__snake_case : Optional[Any] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def lowercase_ ( self ):
__snake_case : Tuple = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , )
__snake_case , __snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
__snake_case : Tuple = scheduler.create_state()
__snake_case : int = scheduler_state
__snake_case : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Tuple = jax.random.PRNGKey(0 )
__snake_case : List[Any] = 50
__snake_case : Union[str, Any] = jax.device_count()
__snake_case : List[str] = num_samples * [prompt]
__snake_case : Any = pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
__snake_case : Union[str, Any] = replicate(UpperCamelCase__ )
__snake_case : int = jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : Optional[int] = shard(UpperCamelCase__ )
__snake_case : int = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def lowercase_ ( self ):
__snake_case : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : List[str] = jax.device_count()
__snake_case : List[Any] = num_samples * [prompt]
__snake_case : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase__ )
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , )
__snake_case : int = replicate(UpperCamelCase__ )
__snake_case : List[Any] = pipeline.prepare_inputs(UpperCamelCase__ )
__snake_case : List[str] = shard(UpperCamelCase__ )
__snake_case : Union[str, Any] = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__snake_case : str = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__snake_case , __snake_case : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , use_memory_efficient_attention=UpperCamelCase__ , )
__snake_case : Any = replicate(UpperCamelCase__ )
__snake_case : Optional[int] = pipeline.prepare_inputs(UpperCamelCase__ )
__snake_case : str = shard(UpperCamelCase__ )
__snake_case : Tuple = pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__snake_case : Tuple = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="resnet50" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , ):
__snake_case : Union[str, Any] = parent
__snake_case : List[Any] = out_indices if out_indices is not None else [4]
__snake_case : Optional[Any] = stage_names
__snake_case : List[str] = out_features
__snake_case : Dict = backbone
__snake_case : Optional[Any] = batch_size
__snake_case : Dict = image_size
__snake_case : int = num_channels
__snake_case : Union[str, Any] = use_pretrained_backbone
__snake_case : int = is_training
def lowercase_ ( self ):
__snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = self.get_config()
return config, pixel_values
def lowercase_ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = TimmBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase_ ( self ):
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case : str = config_and_inputs
__snake_case : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (TimmBackbone,) if is_torch_available() else ()
__UpperCAmelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : List[Any] = TimmBackboneModelTester(self )
__snake_case : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
__snake_case : Any = 'resnet18'
__snake_case : str = 'microsoft/resnet-18'
__snake_case : List[Any] = AutoBackbone.from_pretrained(_UpperCAmelCase , use_timm_backbone=_UpperCAmelCase )
__snake_case : Optional[int] = AutoBackbone.from_pretrained(_UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case : int = AutoBackbone.from_pretrained(_UpperCAmelCase , use_timm_backbone=_UpperCAmelCase , out_indices=[1, 2, 3] )
__snake_case : Union[str, Any] = AutoBackbone.from_pretrained(_UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def lowercase_ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def lowercase_ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def lowercase_ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def lowercase_ ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def lowercase_ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(_UpperCAmelCase )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : str = True
__snake_case : int = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case : Dict = self.all_model_classes[0]
__snake_case : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
__snake_case : Optional[int] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = model(**_UpperCAmelCase )
__snake_case : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case : Tuple = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase_ ( self ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(**_UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case : List[str] = copy.deepcopy(_UpperCAmelCase )
__snake_case : str = None
__snake_case : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = model(**_UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case : int = copy.deepcopy(_UpperCAmelCase )
__snake_case : Any = False
__snake_case : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : str = model(**_UpperCAmelCase )
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE_ ) )
] # the reference grid
__snake_case : Optional[int] = 1
__snake_case : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE_ ) )
] # the action grid
__snake_case : str = init[0]
__snake_case : Optional[int] = init[1]
__snake_case : Tuple = 0
__snake_case : Dict = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Union[str, Any] = [[f, g, x, y]]
__snake_case : List[Any] = False # flag that is set when search is complete
__snake_case : Any = False # flag set if we can't find expand
while not found and not resign:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : int = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : str = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[int] = True
else:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ): # to try out different valid actions
__snake_case : Dict = x + DIRECTIONS[i][0]
__snake_case : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(SCREAMING_SNAKE_CASE_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : int = g + cost
__snake_case : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Any = i
__snake_case : List[str] = []
__snake_case : Any = goal[0]
__snake_case : Union[str, Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[Any] = xa
__snake_case : List[Any] = ya
invpath.append([x, y] )
__snake_case : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
path.append(invpath[len(SCREAMING_SNAKE_CASE_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCAmelCase__( ):
__snake_case : Any = 9, 14 # noqa: F841
__snake_case : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__snake_case : List[Any] = defaultdict(__lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__snake_case : Tuple = mst(__lowercase )
__snake_case : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__snake_case : List[Any] = tuple(answer[:2] )
__snake_case : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
import baseaa
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
return baseaa.aaaencode(string.encode('utf-8' ) )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
return baseaa.aaadecode(__snake_case ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) as f:
__snake_case : Any = json.load(lowerCAmelCase__ )
__snake_case : Optional[Any] = {}
__snake_case : List[str] = []
__snake_case : Any = []
for key, info in class_info.items():
__snake_case : Tuple = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
__snake_case : List[Any] = thing_ids
__snake_case : int = class_names
return metadata
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=10 , _UpperCAmelCase=False , _UpperCAmelCase=255 , _UpperCAmelCase="shi-labs/oneformer_demo" , _UpperCAmelCase="ade20k_panoptic.json" , _UpperCAmelCase=10 , ):
__snake_case : Dict = parent
__snake_case : str = batch_size
__snake_case : Tuple = num_channels
__snake_case : int = min_resolution
__snake_case : str = max_resolution
__snake_case : str = do_resize
__snake_case : Optional[int] = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
__snake_case : List[str] = do_normalize
__snake_case : Union[str, Any] = image_mean
__snake_case : List[str] = image_std
__snake_case : Any = class_info_file
__snake_case : Any = prepare_metadata(snake_case__ , snake_case__ )
__snake_case : str = num_text
__snake_case : Optional[Any] = repo_path
# for the post_process_functions
__snake_case : Any = 2
__snake_case : int = 10
__snake_case : str = 10
__snake_case : List[str] = 3
__snake_case : int = 4
__snake_case : List[Any] = num_labels
__snake_case : int = do_reduce_labels
__snake_case : Union[str, Any] = ignore_index
def lowercase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
__snake_case : Union[str, Any] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
__snake_case , __snake_case : List[Any] = image.size
else:
__snake_case , __snake_case : int = image.shape[1], image.shape[2]
if w < h:
__snake_case : Optional[Any] = int(self.size['shortest_edge'] * h / w )
__snake_case : Tuple = self.size['shortest_edge']
elif w > h:
__snake_case : Union[str, Any] = self.size['shortest_edge']
__snake_case : Dict = int(self.size['shortest_edge'] * w / h )
else:
__snake_case : Optional[int] = self.size['shortest_edge']
__snake_case : int = self.size['shortest_edge']
else:
__snake_case : int = []
for image in image_inputs:
__snake_case , __snake_case : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : Union[str, Any] = max(snake_case__ , key=lambda _UpperCAmelCase : item[0] )[0]
__snake_case : str = max(snake_case__ , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def lowercase_ ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __a , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__UpperCAmelCase = image_processing_class
def lowercase_ ( self ):
__snake_case : List[str] = OneFormerImageProcessorTester(self )
@property
def lowercase_ ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'ignore_index' ) )
self.assertTrue(hasattr(snake_case__ , 'class_info_file' ) )
self.assertTrue(hasattr(snake_case__ , 'num_text' ) )
self.assertTrue(hasattr(snake_case__ , 'repo_path' ) )
self.assertTrue(hasattr(snake_case__ , 'metadata' ) )
self.assertTrue(hasattr(snake_case__ , 'do_reduce_labels' ) )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processor
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
__snake_case : List[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : str = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[int] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
__snake_case : int = image_processor(
snake_case__ , ['semantic'] * len(snake_case__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processor
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
__snake_case : str = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : List[str] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Optional[int] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
__snake_case : Dict = image_processor(
snake_case__ , ['semantic'] * len(snake_case__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
# Initialize image_processor
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
__snake_case : List[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
__snake_case , __snake_case : int = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case : Dict = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
__snake_case : Tuple = image_processor(
snake_case__ , ['semantic'] * len(snake_case__ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="np" ):
__snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__snake_case : List[str] = self.image_processing_tester.num_labels
__snake_case : Optional[int] = None
__snake_case : List[Any] = None
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
if with_segmentation_maps:
__snake_case : Union[str, Any] = num_labels
if is_instance_map:
__snake_case : Optional[int] = list(range(snake_case__ ) ) * 2
__snake_case : Any = dict(enumerate(snake_case__ ) )
__snake_case : Optional[Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__snake_case : Tuple = [Image.fromarray(snake_case__ ) for annotation in annotations]
__snake_case : Any = image_processor(
snake_case__ , ['semantic'] * len(snake_case__ ) , snake_case__ , return_tensors='pt' , instance_id_to_semantic_id=snake_case__ , pad_and_return_pixel_mask=snake_case__ , )
return inputs
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
def common(_UpperCAmelCase=False , _UpperCAmelCase=None ):
__snake_case : Optional[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case__ , is_instance_map=snake_case__ , segmentation_type=snake_case__ )
__snake_case : int = inputs['mask_labels']
__snake_case : Optional[int] = inputs['class_labels']
__snake_case : str = inputs['pixel_values']
__snake_case : Union[str, Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case__ , snake_case__ , snake_case__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case__ )
common(is_instance_map=snake_case__ , segmentation_type='pil' )
common(is_instance_map=snake_case__ , segmentation_type='pil' )
def lowercase_ ( self ):
__snake_case : int = np.zeros((20, 50) )
__snake_case : Optional[int] = 1
__snake_case : Dict = 1
__snake_case : Tuple = 1
__snake_case : Any = binary_mask_to_rle(snake_case__ )
self.assertEqual(len(snake_case__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__snake_case : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
__snake_case : Tuple = fature_extractor.post_process_semantic_segmentation(snake_case__ )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__snake_case : Dict = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__snake_case : Dict = fature_extractor.post_process_semantic_segmentation(snake_case__ , target_sizes=snake_case__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowercase_ ( self ):
__snake_case : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__snake_case : str = image_processor.post_process_instance_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , snake_case__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase_ ( self ):
__snake_case : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
__snake_case : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
__snake_case : List[str] = image_processor.post_process_panoptic_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , snake_case__ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowerCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , a_ , )
super().__init__(*a_ , **a_ )
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__magic_name__ = True
except ImportError:
__magic_name__ = False
try:
from torch.hub import _get_torch_home
__magic_name__ = _get_torch_home()
except ImportError:
__magic_name__ = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
__magic_name__ = os.path.join(torch_cache_home, '''transformers''')
__magic_name__ = '''https://cdn.huggingface.co'''
__magic_name__ = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
__magic_name__ = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
__magic_name__ = os.path.join(PATH, '''config.yaml''')
__magic_name__ = os.path.join(PATH, '''attributes.txt''')
__magic_name__ = os.path.join(PATH, '''objects.txt''')
__magic_name__ = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
__magic_name__ = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
__magic_name__ = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
__magic_name__ = '''pytorch_model.bin'''
__magic_name__ = '''config.yaml'''
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OBJECTS , __UpperCAmelCase : int=ATTRIBUTES ):
__snake_case : str = []
with open(_snake_case ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
__snake_case : Optional[Any] = []
with open(_snake_case ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
__snake_case : str = OrderedDict()
with open(_snake_case , 'rb' ) as f:
__snake_case : int = pkl.load(_snake_case )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
__snake_case : int = ckp.pop(_snake_case )
if isinstance(_snake_case , np.ndarray ):
__snake_case : Optional[Any] = torch.tensor(_snake_case )
else:
assert isinstance(_snake_case , torch.tensor ), type(_snake_case )
__snake_case : Tuple = v
return r
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = {}
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "root" , _UpperCAmelCase=0 ):
__snake_case : List[Any] = name
__snake_case : Union[str, Any] = level
__snake_case : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__snake_case : List[str] = copy.deepcopy(_UpperCAmelCase )
__snake_case : List[str] = copy.deepcopy(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = Config(_UpperCAmelCase , name=_UpperCAmelCase , level=level + 1 )
__snake_case : Optional[Any] = v
setattr(self , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Dict = val
__snake_case : Dict = val
__snake_case : str = key.split('.' )
__snake_case : Any = len(_UpperCAmelCase ) - 1
__snake_case : int = self._pointer
if len(_UpperCAmelCase ) > 1:
for i, l in enumerate(_UpperCAmelCase ):
if hasattr(self , _UpperCAmelCase ) and isinstance(getattr(self , _UpperCAmelCase ) , _UpperCAmelCase ):
setattr(getattr(self , _UpperCAmelCase ) , '.'.join(levels[i:] ) , _UpperCAmelCase )
if l == last_level:
__snake_case : Union[str, Any] = val
else:
__snake_case : str = pointer[l]
def lowercase_ ( self ):
return self._pointer
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
with open(F"""{file_name}""" , 'w' ) as stream:
dump(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
with open(F"""{file_name}""" , 'w' ) as stream:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
with open(_UpperCAmelCase ) as stream:
__snake_case : Union[str, Any] = load(_UpperCAmelCase , Loader=_UpperCAmelCase )
return data
def __str__( self ):
__snake_case : Any = ' '
if self._name != "root":
__snake_case : Optional[Any] = F"""{t * (self._level-1)}{self._name}:\n"""
else:
__snake_case : str = ''
__snake_case : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(_UpperCAmelCase ).__name__})\n"""
__snake_case : List[str] = level
return r[:-1]
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
__snake_case , __snake_case : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
return cls(_UpperCAmelCase )
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Dict = kwargs.pop('cache_dir' , _UpperCAmelCase )
__snake_case : Optional[Any] = kwargs.pop('force_download' , _UpperCAmelCase )
__snake_case : Tuple = kwargs.pop('resume_download' , _UpperCAmelCase )
__snake_case : Union[str, Any] = kwargs.pop('proxies' , _UpperCAmelCase )
__snake_case : Optional[int] = kwargs.pop('local_files_only' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
__snake_case : List[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif os.path.isfile(_UpperCAmelCase ) or is_remote_url(_UpperCAmelCase ):
__snake_case : Dict = pretrained_model_name_or_path
else:
__snake_case : int = hf_bucket_url(_UpperCAmelCase , filename=_UpperCAmelCase , use_cdn=_UpperCAmelCase )
try:
# Load from URL or cache if already cached
__snake_case : int = cached_path(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__snake_case : List[Any] = Config.load_yaml(_UpperCAmelCase )
except EnvironmentError:
__snake_case : int = 'Can\'t load config for'
raise EnvironmentError(_UpperCAmelCase )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(_UpperCAmelCase ), kwargs
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
__snake_case : Dict = torch.load('dump.pt' , map_location=in_tensor.device )
__snake_case : Optional[int] = in_tensor.numpy()
__snake_case : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_snake_case , _snake_case , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(_snake_case , _snake_case , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = urlparse(_snake_case )
return parsed.scheme in ("http", "https")
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=True ):
__snake_case : Tuple = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__snake_case : Tuple = '/' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=0 , __UpperCAmelCase : Tuple=None , ):
__snake_case : int = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join('{}/{}'.format(_snake_case , _snake_case ) for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
__snake_case : int = {'user-agent': ua}
if resume_size > 0:
__snake_case : List[Any] = 'bytes=%d-' % (resume_size,)
__snake_case : int = requests.get(_snake_case , stream=_snake_case , proxies=_snake_case , headers=_snake_case )
if response.status_code == 4_16: # Range not satisfiable
return
__snake_case : Dict = response.headers.get('Content-Length' )
__snake_case : List[Any] = resume_size + int(_snake_case ) if content_length is not None else None
__snake_case : int = tqdm(
unit='B' , unit_scale=_snake_case , total=_snake_case , initial=_snake_case , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_snake_case ) )
temp_file.write(_snake_case )
progress.close()
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=10 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=False , ):
if cache_dir is None:
__snake_case : Tuple = TRANSFORMERS_CACHE
if isinstance(_snake_case , _snake_case ):
__snake_case : Tuple = str(_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Tuple = None
if not local_files_only:
try:
__snake_case : List[Any] = requests.head(_snake_case , allow_redirects=_snake_case , proxies=_snake_case , timeout=_snake_case )
if response.status_code == 2_00:
__snake_case : List[Any] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__snake_case : str = url_to_filename(_snake_case , _snake_case )
# get cache path to put the file
__snake_case : Union[str, Any] = os.path.join(_snake_case , _snake_case )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_snake_case ):
return cache_path
else:
__snake_case : Any = [
file
for file in fnmatch.filter(os.listdir(_snake_case ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_snake_case ) > 0:
return os.path.join(_snake_case , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_snake_case ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__snake_case : Tuple = cache_path + '.lock'
with FileLock(_snake_case ):
# If the download just completed while the lock was activated.
if os.path.exists(_snake_case ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__snake_case : Tuple = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_snake_case , 'a+b' ) as f:
yield f
__snake_case : Optional[Any] = _resumable_file_manager
if os.path.exists(_snake_case ):
__snake_case : Tuple = os.stat(_snake_case ).st_size
else:
__snake_case : Optional[Any] = 0
else:
__snake_case : Any = partial(tempfile.NamedTemporaryFile , dir=_snake_case , delete=_snake_case )
__snake_case : int = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _snake_case , temp_file.name , )
http_get(
_snake_case , _snake_case , proxies=_snake_case , resume_size=_snake_case , user_agent=_snake_case , )
os.replace(temp_file.name , _snake_case )
__snake_case : Any = {'url': url, 'etag': etag}
__snake_case : Optional[Any] = cache_path + '.json'
with open(_snake_case , 'w' ) as meta_file:
json.dump(_snake_case , _snake_case )
return cache_path
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None ):
__snake_case : Dict = url.encode('utf-8' )
__snake_case : Dict = shaaaa(_snake_case )
__snake_case : List[Any] = url_hash.hexdigest()
if etag:
__snake_case : List[Any] = etag.encode('utf-8' )
__snake_case : Union[str, Any] = shaaaa(_snake_case )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False , ):
if cache_dir is None:
__snake_case : Optional[int] = TRANSFORMERS_CACHE
if isinstance(_snake_case , _snake_case ):
__snake_case : Dict = str(_snake_case )
if isinstance(_snake_case , _snake_case ):
__snake_case : Any = str(_snake_case )
if is_remote_url(_snake_case ):
# URL, so get it from the cache (downloading if necessary)
__snake_case : Tuple = get_from_cache(
_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , user_agent=_snake_case , local_files_only=_snake_case , )
elif os.path.exists(_snake_case ):
# File, and it exists.
__snake_case : List[str] = url_or_filename
elif urlparse(_snake_case ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_snake_case ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_snake_case ) )
if extract_compressed_file:
if not is_zipfile(_snake_case ) and not tarfile.is_tarfile(_snake_case ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__snake_case , __snake_case : Optional[int] = os.path.split(_snake_case )
__snake_case : List[str] = output_file.replace('.' , '-' ) + '-extracted'
__snake_case : int = os.path.join(_snake_case , _snake_case )
if os.path.isdir(_snake_case ) and os.listdir(_snake_case ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__snake_case : Optional[Any] = output_path + '.lock'
with FileLock(_snake_case ):
shutil.rmtree(_snake_case , ignore_errors=_snake_case )
os.makedirs(_snake_case )
if is_zipfile(_snake_case ):
with ZipFile(_snake_case , 'r' ) as zip_file:
zip_file.extractall(_snake_case )
zip_file.close()
elif tarfile.is_tarfile(_snake_case ):
__snake_case : Any = tarfile.open(_snake_case )
tar_file.extractall(_snake_case )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_snake_case ) )
return output_path_extracted
return output_path
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str="," ):
assert isinstance(_snake_case , _snake_case )
if os.path.isfile(_snake_case ):
with open(_snake_case ) as f:
__snake_case : Dict = eval(f.read() )
else:
__snake_case : Dict = requests.get(_snake_case )
try:
__snake_case : Tuple = requests.json()
except Exception:
__snake_case : Optional[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__snake_case : Optional[Any] = eval(_snake_case )
except Exception:
__snake_case : Optional[Any] = data.split('\n' )
req.close()
return data
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
__snake_case : int = requests.get(_snake_case )
__snake_case : List[Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
__snake_case : List[str] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_snake_case )
with open(_snake_case , 'rb' ) as stream:
__snake_case : List[str] = pkl.load(_snake_case )
__snake_case : Any = weights.pop('model' )
__snake_case : Optional[int] = {}
for k, v in model.items():
__snake_case : Dict = torch.from_numpy(_snake_case )
if "running_var" in k:
__snake_case : List[Any] = torch.tensor([0] )
__snake_case : Any = k.replace('running_var' , 'num_batches_tracked' )
__snake_case : str = zero
return new
def UpperCAmelCase__( ):
print(F"""{os.path.abspath(os.path.join(_snake_case , os.pardir ) )}/demo.ipynb""" )
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict="RGB" ):
assert isinstance(_snake_case , _snake_case )
if os.path.isfile(_snake_case ):
__snake_case : str = cva.imread(_snake_case )
else:
__snake_case : Dict = get_image_from_url(_snake_case )
assert img is not None, F"""could not connect to: {im}"""
__snake_case : Union[str, Any] = cva.cvtColor(_snake_case , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__snake_case : Dict = img[:, :, ::-1]
return img
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=1 ):
return (images[i : i + batch] for i in range(0 , len(_snake_case ) , _snake_case ))
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=None ):
__snake_case : Dict = np.random.default_rng(_SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = length
__snake_case : Any = rng.normal(size=(length,) ).astype(np.floataa )
__snake_case : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self , _UpperCAmelCase ):
return {"x": self.x[i], "y": self.y[i]}
class __SCREAMING_SNAKE_CASE ( torch.nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=False ):
super().__init__()
__snake_case : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__snake_case : Any = True
def lowercase_ ( self , _UpperCAmelCase=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__snake_case : List[str] = False
return x * self.a[0] + self.b[0]
class __SCREAMING_SNAKE_CASE ( torch.nn.Module):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=False ):
super().__init__()
__snake_case : str = torch.nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ).float() )
__snake_case : Union[str, Any] = torch.nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ).float() )
__snake_case : Tuple = True
def lowercase_ ( self , _UpperCAmelCase=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__snake_case : Any = False
return x * self.a + self.b
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
__snake_case : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
__snake_case : int = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
__snake_case : Tuple = load_dataset('csv' , data_files=_A )
__snake_case : Dict = datasets['train'].unique('label' )
__snake_case : Optional[int] = {v: i for i, v in enumerate(_A )}
def tokenize_function(__UpperCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : List[Any] = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_A , max_length=_A , padding='max_length' )
if "label" in examples:
__snake_case : str = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case : Dict = datasets.map(
_A , batched=_A , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__snake_case : List[Any] = DataLoader(tokenized_datasets['train'] , shuffle=_A , collate_fn=_A , batch_size=2 )
__snake_case : Optional[int] = DataLoader(tokenized_datasets['validation'] , shuffle=_A , collate_fn=_A , batch_size=1 )
return train_dataloader, eval_dataloader
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = 'pt'
elif is_tf_available():
__magic_name__ = 'tf'
else:
__magic_name__ = 'jax'
class __SCREAMING_SNAKE_CASE ( _A , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : Optional[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
__snake_case : Union[str, Any] = []
for i in range(len(UpperCamelCase__ ) ):
try:
__snake_case : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : Optional[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase__ ) )
__snake_case : List[str] = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
__snake_case : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
__snake_case : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : List[Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
__snake_case : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
__snake_case : Any = ' ' + output_txt
__snake_case : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : str = 'Unicode €.'
__snake_case : List[Any] = tokenizer(UpperCamelCase__ )
__snake_case : int = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , UpperCamelCase__ )
# decoding
__snake_case : Union[str, Any] = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '[CLS]Unicode €.[SEP]' )
__snake_case : str = tokenizer('e è é ê ë' )
__snake_case : Any = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , UpperCamelCase__ )
# decoding
__snake_case : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.perceiver_tokenizer
__snake_case : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : Tuple = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : str = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : Any = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase__ )
self.assertIn('attention_mask' , UpperCamelCase__ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase__ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase__ )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : Tuple = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding='max_length' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
__snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : List[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__snake_case : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
__snake_case : Union[str, Any] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
__snake_case : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : List[str] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : str = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__snake_case : str = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
__snake_case : Dict = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : int = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowercase_ ( self ):
__snake_case : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : str = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Union[str, Any] = json.load(UpperCamelCase__ )
__snake_case : Optional[int] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : Optional[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase__ )]
__snake_case : List[Any] = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : int = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( snake_case__):
"""simple docstring"""
__UpperCAmelCase = '''conditional_detr'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=3 , _UpperCAmelCase=300 , _UpperCAmelCase=6 , _UpperCAmelCase=2_048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=2_048 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase="resnet50" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__snake_case : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_A , _A ):
__snake_case : Tuple = backbone_config.get('model_type' )
__snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__snake_case : List[Any] = config_class.from_dict(_A )
__snake_case : Tuple = use_timm_backbone
__snake_case : List[str] = backbone_config
__snake_case : Dict = num_channels
__snake_case : int = num_queries
__snake_case : int = d_model
__snake_case : str = encoder_ffn_dim
__snake_case : List[str] = encoder_layers
__snake_case : Optional[Any] = encoder_attention_heads
__snake_case : Union[str, Any] = decoder_ffn_dim
__snake_case : List[Any] = decoder_layers
__snake_case : Optional[Any] = decoder_attention_heads
__snake_case : Any = dropout
__snake_case : Any = attention_dropout
__snake_case : int = activation_dropout
__snake_case : Optional[int] = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : Union[str, Any] = init_xavier_std
__snake_case : Optional[Any] = encoder_layerdrop
__snake_case : int = decoder_layerdrop
__snake_case : List[str] = encoder_layers
__snake_case : str = auxiliary_loss
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = backbone
__snake_case : List[str] = use_pretrained_backbone
__snake_case : List[Any] = dilation
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : Optional[int] = bbox_cost
__snake_case : Dict = giou_cost
# Loss coefficients
__snake_case : Optional[int] = mask_loss_coefficient
__snake_case : Union[str, Any] = dice_loss_coefficient
__snake_case : List[Any] = cls_loss_coefficient
__snake_case : Dict = bbox_loss_coefficient
__snake_case : Tuple = giou_loss_coefficient
__snake_case : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def lowercase_ ( self ):
return self.encoder_attention_heads
@property
def lowercase_ ( self ):
return self.d_model
def lowercase_ ( self ):
__snake_case : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__snake_case : Dict = self.backbone_config.to_dict()
__snake_case : Union[str, Any] = self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( snake_case__):
"""simple docstring"""
__UpperCAmelCase = version.parse("1.11")
@property
def lowercase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def lowercase_ ( self ):
return 1E-5
@property
def lowercase_ ( self ):
return 12 | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ReformerTokenizer
__UpperCAmelCase = ReformerTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Union[str, Any] = ReformerTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
__snake_case : List[str] = '<s>'
__snake_case : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowercase_ ( self ):
__snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__a ) , 1_000 )
def lowercase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowercase_ ( self ):
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : int = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = 'I was born in 92000, and this is falsé.'
__snake_case : Optional[int] = tokenizer.tokenize(__a )
__snake_case : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
__snake_case : Union[str, Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Optional[Any] = tokenizer.encode(__a )
__snake_case : Union[str, Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def lowercase_ ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
__snake_case : Union[str, Any] = 'This is a simple input'
__snake_case : Tuple = ['This is a simple input 1', 'This is a simple input 2']
__snake_case : Optional[int] = ('This is a simple input', 'This is a pair')
__snake_case : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding='max_length' )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding='max_length' , )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : List[Any] = ReformerTokenizer(__a , keep_accents=__a )
__snake_case : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
__snake_case : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowercase_ ( self ):
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def lowercase_ ( self ):
__snake_case : List[str] = 'Hello World!'
__snake_case : List[str] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowercase_ ( self ):
__snake_case : Optional[int] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__snake_case : List[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def lowercase_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__snake_case : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : str = ' '.join(__a )
__snake_case : List[str] = self.big_tokenizer.encode_plus(__a , return_tensors='pt' )
__snake_case : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
__snake_case : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__snake_case : List[str] = encoded_sequence['input_ids'].shape
__snake_case : List[Any] = ReformerModel(__a )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def lowercase_ ( self ):
__snake_case : int = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__snake_case : int = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=__a , sequences=__a , )
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : List[Any]="pt" ):
__snake_case : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(' ' ) else {}
__snake_case : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase_ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , ):
__snake_case : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( lowercase_):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="train" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="" , ):
super().__init__()
__snake_case : str = Path(lowerCamelCase_ ).joinpath(type_path + '.source' )
__snake_case : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + '.target' )
__snake_case : Union[str, Any] = self.get_char_lens(self.src_file )
__snake_case : int = max_source_length
__snake_case : str = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
__snake_case : List[str] = tokenizer
__snake_case : Dict = prefix
if n_obs is not None:
__snake_case : List[Any] = self.src_lens[:n_obs]
__snake_case : int = src_lang
__snake_case : Optional[int] = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _UpperCAmelCase ):
__snake_case : Any = index + 1 # linecache starts at 1
__snake_case : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip('\n' )
__snake_case : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
__snake_case : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
__snake_case : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , 'right' )
__snake_case : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , 'right' )
__snake_case : Tuple = source_inputs["""input_ids"""].squeeze()
__snake_case : Tuple = target_inputs["""input_ids"""].squeeze()
__snake_case : List[str] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = torch.stack([x['input_ids'] for x in batch] )
__snake_case : int = torch.stack([x['attention_mask'] for x in batch] )
__snake_case : int = torch.stack([x['decoder_input_ids'] for x in batch] )
__snake_case : int = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
__snake_case : Dict = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
__snake_case : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
__snake_case : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
__snake_case : int = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
return list(itertools.chain.from_iterable(lowerCamelCase_ ) )
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : str = get_git_info()
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'git_log.json' ) )
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=4 , **__UpperCAmelCase : Optional[int] ):
with open(lowerCamelCase_ , 'w' ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__( __UpperCAmelCase : Any ):
with open(lowerCamelCase_ ) as f:
return json.load(lowerCamelCase_ )
def UpperCAmelCase__( ):
__snake_case : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ )
__snake_case : Tuple = {
"""repo_id""": str(lowerCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ):
return list(map(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ):
with open(lowerCamelCase_ , 'wb' ) as f:
return pickle.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__( __UpperCAmelCase : Any ):
def remove_articles(__UpperCAmelCase : List[Any] ):
return re.sub(r'\b(a|an|the)\b' , ' ' , lowerCamelCase_ )
def white_space_fix(__UpperCAmelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase : Tuple ):
__snake_case : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ):
__snake_case : Tuple = normalize_answer(lowerCamelCase_ ).split()
__snake_case : Optional[int] = normalize_answer(lowerCamelCase_ ).split()
__snake_case : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ )
__snake_case : Any = sum(common.values() )
if num_same == 0:
return 0
__snake_case : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ )
__snake_case : str = 1.0 * num_same / len(lowerCamelCase_ )
__snake_case : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ):
return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ )
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
__snake_case : Union[str, Any] = 0
for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ):
em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
em /= len(lowerCamelCase_ )
return {"em": em}
def UpperCAmelCase__( __UpperCAmelCase : int ):
return model_prefix.startswith('rag' )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ):
__snake_case : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case : Dict = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
continue
__snake_case : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p]
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
delattr(lowerCamelCase_ , lowerCamelCase_ )
return hparams, config
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = 10
def lowercase_ ( self ):
__snake_case : List[str] = [1, 2, 3, 4]
__snake_case : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowercase_ ( self ):
__snake_case : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__snake_case : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowercase_ ( self ):
__snake_case : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__snake_case : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def lowercase_ ( self ):
__snake_case : List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__snake_case , __snake_case : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def lowercase_ ( self ):
__snake_case : int = ''
__snake_case , __snake_case : str = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def lowercase_ ( self ):
__snake_case : Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__snake_case , __snake_case : List[str] = process_story(UpperCamelCase_ )
__snake_case : Union[str, Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__snake_case : List[Any] = ['It was the best of times.']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self ):
__snake_case : Optional[Any] = torch.tensor([1, 2, 3, 4] )
__snake_case : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def lowercase_ ( self ):
__snake_case : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__snake_case : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def lowercase_ ( self ):
__snake_case : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__snake_case : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def lowercase_ ( self ):
__snake_case : Optional[int] = 101
__snake_case : str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__snake_case : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__snake_case : Any = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "xmod"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=("en_XX",) , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = hidden_act
__snake_case : Dict = intermediate_size
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : int = type_vocab_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : int = position_embedding_type
__snake_case : List[str] = use_cache
__snake_case : Union[str, Any] = classifier_dropout
__snake_case : Tuple = pre_norm
__snake_case : str = adapter_reduction_factor
__snake_case : int = adapter_layer_norm
__snake_case : Any = adapter_reuse_layer_norm
__snake_case : List[str] = ln_before_adapter
__snake_case : Union[str, Any] = list(_UpperCAmelCase )
__snake_case : Optional[int] = default_language
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__magic_name__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __lowerCAmelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 50 , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase_ ):
__snake_case : Dict = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__snake_case : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__snake_case : Union[str, Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case : str = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case : Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , eta=lowerCAmelCase_ , use_clipped_model_output=lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
__snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : List[str] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__snake_case : Any = DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase_ ( self ):
__snake_case : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def lowercase_ ( self ):
__snake_case : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
__snake_case : Tuple = DisjunctiveConstraint(UpperCAmelCase__ )
__snake_case : Union[str, Any] = dc.update(1 )
__snake_case : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__snake_case : Dict = dc.update(2 )
__snake_case : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case : List[Any] = dc.update(3 )
__snake_case : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase_ ( self ):
__snake_case : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__snake_case : Dict = DisjunctiveConstraint(UpperCAmelCase__ )
__snake_case : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__snake_case : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case : Optional[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__snake_case : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__snake_case : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__snake_case : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__magic_name__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : Any = float(__a )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : int = decimal - int(__a )
if fractional_part == 0:
return int(__a ), 1
else:
__snake_case : Tuple = len(str(__a ).split('.' )[1] )
__snake_case : int = int(decimal * (10**number_of_frac_digits) )
__snake_case : int = 10**number_of_frac_digits
__snake_case , __snake_case : List[str] = denominator, numerator
while True:
__snake_case : Optional[int] = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Tuple = divisor, remainder
__snake_case , __snake_case : Optional[int] = numerator / divisor, denominator / divisor
return int(__a ), int(__a )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
from itertools import count
def UpperCAmelCase__( __UpperCAmelCase : int = 50 ):
__snake_case : Dict = [1] * min_block_length
for n in count(__UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(lowercase__)
class __SCREAMING_SNAKE_CASE ( lowercase__):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : Dict = {}
if "threshold" in kwargs:
__snake_case : str = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = load_image(UpperCAmelCase__ )
__snake_case : Optional[Any] = torch.IntTensor([[image.height, image.width]] )
__snake_case : int = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__snake_case : int = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__snake_case : str = target_size
return inputs
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = model_inputs.pop('target_size' )
__snake_case : Union[str, Any] = self.model(**UpperCAmelCase__ )
__snake_case : Any = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__snake_case : List[str] = model_inputs['''bbox''']
return model_outputs
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0.9 ):
__snake_case : Dict = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__snake_case : Union[str, Any] = target_size[0].tolist()
def unnormalize(_UpperCAmelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
__snake_case : List[Any] = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__snake_case : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__snake_case : Dict = [unnormalize(UpperCAmelCase__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
__snake_case : List[str] = ['''score''', '''label''', '''box''']
__snake_case : Any = [dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) for vals in zip(scores.tolist() , UpperCAmelCase__ , UpperCAmelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__snake_case : Tuple = self.image_processor.post_process_object_detection(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : int = raw_annotations[0]
__snake_case : Dict = raw_annotation['''scores''']
__snake_case : Optional[Any] = raw_annotation['''labels''']
__snake_case : List[str] = raw_annotation['''boxes''']
__snake_case : List[Any] = scores.tolist()
__snake_case : str = [self.model.config.idalabel[label.item()] for label in labels]
__snake_case : List[str] = [self._get_bounding_box(UpperCAmelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__snake_case : str = ['''score''', '''label''', '''box''']
__snake_case : Optional[int] = [
dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def lowercase_ ( self , _UpperCAmelCase ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__snake_case : Union[str, Any] = box.int().tolist()
__snake_case : Optional[Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = 4_2
__UpperCAmelCase = 4_2
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = 4_2
__UpperCAmelCase = None
__UpperCAmelCase = None
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_):
"""simple docstring"""
__UpperCAmelCase = "train"
__UpperCAmelCase = "dev"
__UpperCAmelCase = "test"
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase=1 , _UpperCAmelCase="[SEP]" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=-100 , _UpperCAmelCase=0 , _UpperCAmelCase=True , ):
__snake_case : Any = {label: i for i, label in enumerate(_lowercase )}
__snake_case : List[Any] = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , _lowercase , len(_lowercase ) )
__snake_case : List[str] = []
__snake_case : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
__snake_case : str = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__snake_case : str = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
__snake_case : Tuple = tokens[: (max_seq_length - special_tokens_count)]
__snake_case : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__snake_case : Tuple = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__snake_case : Optional[Any] = [cls_token] + tokens
__snake_case : Dict = [pad_token_label_id] + label_ids
__snake_case : Optional[int] = [cls_token_segment_id] + segment_ids
__snake_case : Any = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__snake_case : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
__snake_case : str = max_seq_length - len(_lowercase )
if pad_on_left:
__snake_case : Any = ([pad_token] * padding_length) + input_ids
__snake_case : Any = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__snake_case : Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids
__snake_case : Tuple = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_lowercase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_lowercase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_lowercase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : Tuple = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase_):
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = nn.CrossEntropyLoss().ignore_index
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = Split.train , ):
# Load data features from cache or dataset file
__snake_case : Dict = os.path.join(
_lowercase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : List[Any] = cached_features_file + '.lock'
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__snake_case : List[str] = torch.load(_lowercase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__snake_case : int = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Dict = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 4_2
__UpperCAmelCase = -1_0_0
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = Split.train , ):
__snake_case : Tuple = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
__snake_case : Union[str, Any] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__snake_case : Optional[int] = tf.data.Dataset.from_generator(
_lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__snake_case : Tuple = tf.data.Dataset.from_generator(
_lowercase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase_ ( self ):
__snake_case : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
return self.features[i]
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __SCREAMING_SNAKE_CASE ( snake_case__):
"""simple docstring"""
__UpperCAmelCase = "trajectory_transformer"
__UpperCAmelCase = ["past_key_values"]
__UpperCAmelCase = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=100 , _UpperCAmelCase=5 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=249 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=25 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0006 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=50_256 , _UpperCAmelCase=50_256 , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[Any] = action_weight
__snake_case : Any = reward_weight
__snake_case : List[Any] = value_weight
__snake_case : str = max_position_embeddings
__snake_case : Optional[int] = block_size
__snake_case : Tuple = action_dim
__snake_case : Dict = observation_dim
__snake_case : Union[str, Any] = transition_dim
__snake_case : List[str] = learning_rate
__snake_case : List[Any] = n_layer
__snake_case : Dict = n_head
__snake_case : List[Any] = n_embd
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Dict = resid_pdrop
__snake_case : int = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = kaiming_initializer_range
__snake_case : List[Any] = use_cache
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def lowercase_ ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self ):
__snake_case : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
self.assertTrue(hasattr(A_ , 'padding_value' ) )
def lowercase_ ( self ):
__snake_case : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Optional[int] = feat_extract.model_input_names[0]
__snake_case : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
__snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
__snake_case : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__snake_case : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__snake_case : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
__snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : int = feat_extract.model_input_names[0]
__snake_case : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__snake_case : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__snake_case : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowercase_ ( self ):
__snake_case : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
__snake_case : int = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : int = feat_extract.model_input_names[0]
__snake_case : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
__snake_case : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__snake_case : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowercase_ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
__snake_case : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
__snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
__snake_case : Tuple = feat_extract.model_input_names[0]
__snake_case : int = BatchFeature({input_name: speech_inputs} )
__snake_case : Union[str, Any] = self.feat_extract_tester.seq_length_diff
__snake_case : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
__snake_case : List[Any] = self.feat_extract_tester.min_seq_length
__snake_case : Any = self.feat_extract_tester.batch_size
__snake_case : int = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__snake_case : str = feat_extract.pad(A_ , padding=A_ )
__snake_case : Optional[Any] = input_a[input_name]
__snake_case : Tuple = feat_extract.pad(A_ , padding='longest' )
__snake_case : int = input_a[input_name]
__snake_case : str = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
__snake_case : Optional[int] = input_a[input_name]
__snake_case : List[Any] = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
__snake_case : Tuple = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' )[input_name]
__snake_case : Tuple = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , return_tensors='np' )
__snake_case : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__snake_case : List[str] = feat_extract.pad(A_ , pad_to_multiple_of=10 )
__snake_case : Optional[int] = input_a[input_name]
__snake_case : int = feat_extract.pad(A_ , padding='longest' , pad_to_multiple_of=10 )
__snake_case : str = input_a[input_name]
__snake_case : Tuple = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ )
__snake_case : List[str] = input_a[input_name]
__snake_case : Any = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='np' , )
__snake_case : int = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
__snake_case : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__snake_case : Optional[int] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowercase_ ( self , _UpperCAmelCase=False ):
def _inputs_have_equal_length(_UpperCAmelCase ):
__snake_case : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(_UpperCAmelCase , _UpperCAmelCase ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
__snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
__snake_case : Union[str, Any] = feat_extract.model_input_names[0]
__snake_case : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__snake_case : str = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=A_ )
__snake_case : Dict = input_a[input_name]
__snake_case : Tuple = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
__snake_case : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
__snake_case : Any = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=A_ , )
__snake_case : List[Any] = input_a[input_name]
__snake_case : List[Any] = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
__snake_case : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
__snake_case : List[Any] = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='np' , )
__snake_case : Tuple = input_a[input_name]
__snake_case : Optional[int] = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ )
__snake_case : Optional[int] = input_a[input_name]
__snake_case : Optional[int] = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
__snake_case : Optional[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__snake_case : List[str] = 12
__snake_case : List[str] = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
__snake_case : Optional[Any] = input_a[input_name]
__snake_case : List[Any] = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
__snake_case : Optional[int] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__snake_case : Optional[int] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__snake_case : Dict = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def lowercase_ ( self ):
self._check_padding(numpify=A_ )
def lowercase_ ( self ):
self._check_padding(numpify=A_ )
def lowercase_ ( self ):
self._check_truncation(numpify=A_ )
def lowercase_ ( self ):
self._check_truncation(numpify=A_ )
@require_torch
def lowercase_ ( self ):
__snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : int = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case : Any = feat_extract.model_input_names[0]
__snake_case : Optional[int] = BatchFeature({input_name: speech_inputs} )
__snake_case : List[str] = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
__snake_case : Any = feat_extract.pad(A_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowercase_ ( self ):
__snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Any = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case : List[str] = feat_extract.model_input_names[0]
__snake_case : List[str] = BatchFeature({input_name: speech_inputs} )
__snake_case : List[str] = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
__snake_case : int = feat_extract.pad(A_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.feat_extract_dict
__snake_case : Tuple = True
__snake_case : Any = self.feature_extraction_class(**A_ )
__snake_case : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case : Dict = [len(A_ ) for x in speech_inputs]
__snake_case : List[Any] = feat_extract.model_input_names[0]
__snake_case : List[Any] = BatchFeature({input_name: speech_inputs} )
__snake_case : List[str] = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def lowercase_ ( self ):
__snake_case : List[str] = self.feat_extract_dict
__snake_case : int = True
__snake_case : Union[str, Any] = self.feature_extraction_class(**A_ )
__snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case : int = [len(A_ ) for x in speech_inputs]
__snake_case : List[Any] = feat_extract.model_input_names[0]
__snake_case : List[Any] = BatchFeature({input_name: speech_inputs} )
__snake_case : Optional[int] = min(A_ )
__snake_case : str = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] ):
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase=0.01 , _UpperCAmelCase=1_000 ):
__snake_case : Optional[Any] = p_stop
__snake_case : Any = max_length
def __iter__( self ):
__snake_case : List[Any] = 0
__snake_case : List[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case : Dict = random.random() < self.p_stop
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True ):
__snake_case : Optional[Any] = [
BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
for i in range(2 )
]
__snake_case : Optional[int] = [list(__UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCamelCase ) for shard in batch_sampler_shards] , [len(__UpperCamelCase ) for e in expected] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowercase_ ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__snake_case : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__snake_case : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__snake_case : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__snake_case : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is very small.
__snake_case : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__snake_case : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : List[Any] = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
def lowercase_ ( self ):
# Check the shards when the dataset is a round multiple of batch size.
__snake_case : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__snake_case : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__snake_case : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__snake_case : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
__snake_case : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : List[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__snake_case : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : List[Any] = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
def lowercase_ ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
__snake_case : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__snake_case : str = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
def lowercase_ ( self ):
# Check the shards when the dataset is a round multiple of batch size.
__snake_case : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : List[str] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
__snake_case : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : int = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__snake_case : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : List[str] = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case : Optional[Any] = [BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , even_batches=__UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=False ):
random.seed(__UpperCamelCase )
__snake_case : List[Any] = list(__UpperCamelCase )
__snake_case : Union[str, Any] = [
IterableDatasetShard(
__UpperCamelCase , batch_size=__UpperCamelCase , drop_last=__UpperCamelCase , num_processes=__UpperCamelCase , process_index=__UpperCamelCase , split_batches=__UpperCamelCase , )
for i in range(__UpperCamelCase )
]
__snake_case : Tuple = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCamelCase )
iterable_dataset_lists.append(list(__UpperCamelCase ) )
__snake_case : Optional[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case : Optional[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(len(__UpperCamelCase ) % shard_batch_size == 0 )
__snake_case : Tuple = []
for idx in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCamelCase ) < len(__UpperCamelCase ):
reference += reference
self.assertListEqual(__UpperCamelCase , reference[: len(__UpperCamelCase )] )
def lowercase_ ( self ):
__snake_case : Dict = 42
__snake_case : Any = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
# Edge case with a very small dataset
__snake_case : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
def lowercase_ ( self ):
__snake_case : Tuple = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCamelCase )
__snake_case : Optional[Any] = SkipBatchSampler(__UpperCamelCase , 2 )
self.assertListEqual(list(__UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ):
__snake_case : Tuple = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ):
__snake_case : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case : Any = skip_first_batches(__UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self ):
__snake_case : Tuple = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self ):
Accelerator()
__snake_case : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( A_):
"""simple docstring"""
__UpperCAmelCase = '''ClapFeatureExtractor'''
__UpperCAmelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__snake_case : List[Any] = kwargs.pop('sampling_rate' , _UpperCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
__snake_case : str = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if audios is not None:
__snake_case : str = self.feature_extractor(
_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and audios is not None:
__snake_case : int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowercase_ ( self ):
__snake_case : int = self.tokenizer.model_input_names
__snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __SCREAMING_SNAKE_CASE ( __A):
"""simple docstring"""
__UpperCAmelCase = """unispeech-sat"""
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1_500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__snake_case : int = hidden_size
__snake_case : List[Any] = feat_extract_norm
__snake_case : str = feat_extract_activation
__snake_case : str = list(UpperCamelCase__ )
__snake_case : List[str] = list(UpperCamelCase__ )
__snake_case : Union[str, Any] = list(UpperCamelCase__ )
__snake_case : List[Any] = conv_bias
__snake_case : Union[str, Any] = num_conv_pos_embeddings
__snake_case : str = num_conv_pos_embedding_groups
__snake_case : int = len(self.conv_dim )
__snake_case : int = num_hidden_layers
__snake_case : Dict = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Tuple = num_attention_heads
__snake_case : Tuple = hidden_dropout
__snake_case : Optional[Any] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Any = feat_proj_dropout
__snake_case : str = final_dropout
__snake_case : Optional[int] = layerdrop
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = vocab_size
__snake_case : Optional[int] = num_clusters
__snake_case : Tuple = do_stable_layer_norm
__snake_case : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Dict = apply_spec_augment
__snake_case : Any = mask_time_prob
__snake_case : Optional[Any] = mask_time_length
__snake_case : str = mask_time_min_masks
__snake_case : int = mask_feature_prob
__snake_case : int = mask_feature_length
__snake_case : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__snake_case : List[str] = num_codevectors_per_group
__snake_case : Tuple = num_codevector_groups
__snake_case : List[str] = contrastive_logits_temperature
__snake_case : Optional[Any] = feat_quantizer_dropout
__snake_case : Any = num_negatives
__snake_case : Any = codevector_dim
__snake_case : Tuple = proj_codevector_dim
__snake_case : Optional[int] = diversity_loss_weight
# ctc loss
__snake_case : Optional[int] = ctc_loss_reduction
__snake_case : Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case : Union[str, Any] = list(UpperCamelCase__ )
__snake_case : Optional[int] = list(UpperCamelCase__ )
__snake_case : Any = list(UpperCamelCase__ )
__snake_case : int = xvector_output_dim
@property
def lowercase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''T5Config'''
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Any ):
__snake_case : Any = jnp.zeros_like(__UpperCAmelCase )
__snake_case : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__snake_case : Any = shifted_input_ids.at[:, 0].set(__UpperCAmelCase )
__snake_case : Tuple = jnp.where(shifted_input_ids == -1_00 , __UpperCAmelCase , __UpperCAmelCase )
return shifted_input_ids
class __SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
class __SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
class __SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowercase_):
"""simple docstring"""
__UpperCAmelCase = '''luke'''
def __init__( self , _UpperCAmelCase=50_267 , _UpperCAmelCase=500_000 , _UpperCAmelCase=768 , _UpperCAmelCase=256 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__snake_case : Dict = vocab_size
__snake_case : Optional[int] = entity_vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[Any] = entity_emb_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : Tuple = type_vocab_size
__snake_case : int = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Dict = use_entity_aware_attention
__snake_case : Optional[Any] = classifier_dropout | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = VideoToVideoSDPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCAmelCase = False
# No `output_type`.
__UpperCAmelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
__snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
__snake_case : Dict = CLIPTextModel(lowerCAmelCase_ )
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__snake_case : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
__snake_case : str = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
if str(lowerCAmelCase_ ).startswith('mps' ):
__snake_case : Optional[Any] = torch.manual_seed(lowerCAmelCase_ )
else:
__snake_case : Optional[int] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__snake_case : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowercase_ ( self ):
__snake_case : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : List[str] = VideoToVideoSDPipeline(**lowerCAmelCase_ )
__snake_case : Optional[Any] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__snake_case : Tuple = self.get_dummy_inputs(lowerCAmelCase_ )
__snake_case : Any = 'np'
__snake_case : Optional[Any] = sd_pipe(**lowerCAmelCase_ ).frames
__snake_case : Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__snake_case : str = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase_ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Optional[int] = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__snake_case : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case : int = torch.randn((1, 10, 3, 1_024, 576) , generator=lowerCAmelCase_ )
__snake_case : List[str] = video.to('cuda' )
__snake_case : Union[str, Any] = 'Spiderman is surfing'
__snake_case : Dict = pipe(lowerCAmelCase_ , video=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=3 , output_type='pt' ).frames
__snake_case : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __SCREAMING_SNAKE_CASE ( _a):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = "arrow" , **_UpperCAmelCase , ):
super().__init__(
split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , **_A , )
__snake_case : str = load_from_cache_file
__snake_case : str = file_format
__snake_case : Dict = Spark(
df=_A , features=_A , cache_dir=_A , working_dir=_A , **_A , )
def lowercase_ ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_A , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {'''vocab_file''': '''prophetnet.tokenizer'''}
__magic_name__ = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
__magic_name__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
__magic_name__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': 512,
}
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
__snake_case : str = collections.OrderedDict()
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' ) as reader:
__snake_case : Optional[Any] = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
__snake_case : int = token.rstrip('\n' )
__snake_case : int = index
return vocab
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
__snake_case : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case : int = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
__snake_case : Optional[int] = F"""[unused{i}]"""
__snake_case : Optional[int] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case : Dict = 12
__snake_case : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__UpperCamelCase )
def __getstate__( self ):
__snake_case : Any = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : Tuple = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Dict = {}
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + [1]
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def lowercase_ ( self ):
__snake_case : str = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def lowercase_ ( self , _UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : Union[str, Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self , _UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
__snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case : Optional[int] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__magic_name__ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple="attention" ):
__snake_case : List[str] = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__snake_case : Optional[int] = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__snake_case : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__snake_case : List[str] = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int]=False ):
if split_mlp_wi:
__snake_case : int = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__snake_case : List[str] = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__snake_case : Tuple = (wi_a, wi_a)
else:
__snake_case : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__snake_case : Tuple = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , *, __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ):
__snake_case : int = traverse_util.flatten_dict(variables['target'] )
__snake_case : List[str] = {'/'.join(__UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case : List[Any] = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , __UpperCAmelCase )
__snake_case : str = collections.OrderedDict()
# Shared embeddings.
__snake_case : Optional[int] = old['token_embedder/embedding']
# Encoder.
for i in range(__UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : Union[str, Any] = tax_layer_norm_lookup(__UpperCAmelCase , __UpperCAmelCase , 'encoder' , 'pre_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : List[str] = tax_attention_lookup(__UpperCAmelCase , __UpperCAmelCase , 'encoder' , 'attention' )
__snake_case : Any = layer_norm
__snake_case : Union[str, Any] = k.T
__snake_case : Tuple = o.T
__snake_case : Optional[int] = q.T
__snake_case : List[Any] = v.T
# Block i, layer 1 (MLP).
__snake_case : Optional[Any] = tax_layer_norm_lookup(__UpperCAmelCase , __UpperCAmelCase , 'encoder' , 'pre_mlp_layer_norm' )
__snake_case , __snake_case : Any = tax_mlp_lookup(__UpperCAmelCase , __UpperCAmelCase , 'encoder' , __UpperCAmelCase )
__snake_case : int = layer_norm
if split_mlp_wi:
__snake_case : int = wi[0].T
__snake_case : Optional[int] = wi[1].T
else:
__snake_case : List[str] = wi.T
__snake_case : Dict = wo.T
__snake_case : List[Any] = old[
'encoder/relpos_bias/rel_embedding'
].T
__snake_case : str = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(__UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case : Dict = tax_layer_norm_lookup(__UpperCAmelCase , __UpperCAmelCase , 'decoder' , 'pre_self_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : int = tax_attention_lookup(__UpperCAmelCase , __UpperCAmelCase , 'decoder' , 'self_attention' )
__snake_case : List[Any] = layer_norm
__snake_case : Tuple = k.T
__snake_case : int = o.T
__snake_case : List[str] = q.T
__snake_case : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
__snake_case : Optional[Any] = tax_layer_norm_lookup(__UpperCAmelCase , __UpperCAmelCase , 'decoder' , 'pre_cross_attention_layer_norm' )
__snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = tax_attention_lookup(__UpperCAmelCase , __UpperCAmelCase , 'decoder' , 'encoder_decoder_attention' )
__snake_case : Tuple = layer_norm
__snake_case : List[Any] = k.T
__snake_case : List[Any] = o.T
__snake_case : Dict = q.T
__snake_case : List[str] = v.T
# Block i, layer 2 (MLP).
__snake_case : Union[str, Any] = tax_layer_norm_lookup(__UpperCAmelCase , __UpperCAmelCase , 'decoder' , 'pre_mlp_layer_norm' )
__snake_case , __snake_case : Tuple = tax_mlp_lookup(__UpperCAmelCase , __UpperCAmelCase , 'decoder' , __UpperCAmelCase )
__snake_case : Optional[int] = layer_norm
if split_mlp_wi:
__snake_case : Tuple = wi[0].T
__snake_case : Dict = wi[1].T
else:
__snake_case : List[Any] = wi.T
__snake_case : List[str] = wo.T
__snake_case : List[str] = old['decoder/decoder_norm/scale']
__snake_case : Any = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Any ):
__snake_case : List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case : List[str] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case : Dict = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__snake_case : List[Any] = state_dict['shared.weight']
return state_dict
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ):
__snake_case : Union[str, Any] = checkpoints.load_tax_checkpoint(__UpperCAmelCase )
__snake_case : Union[str, Any] = convert_tax_to_pytorch(__UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=__UpperCAmelCase )
__snake_case : List[str] = make_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] = False ):
__snake_case : Dict = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case : List[str] = TaEncoderModel(__UpperCAmelCase )
else:
__snake_case : Tuple = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__UpperCAmelCase )
print('Done' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__magic_name__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , UpperCamelCase_):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : int = load_tool('text-classification' )
self.tool.setup()
__snake_case : Union[str, Any] = load_tool('text-classification' , remote=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def lowercase_ ( self ):
__snake_case : int = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def lowercase_ ( self ):
__snake_case : str = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
def lowercase_ ( self ):
__snake_case : str = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(_UpperCAmelCase , 'positive' )
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def lowercase_ ( self ):
__snake_case : List[Any] = []
__snake_case : List[Any] = []
for i in range(self.num_layers ):
__snake_case : str = self.in_channels if i == 0 else self.out_channels
__snake_case : str = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
__snake_case : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
__snake_case : Dict = resnets
__snake_case : int = attentions
if self.add_downsample:
__snake_case : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
__snake_case : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__snake_case : Tuple = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
__snake_case : Optional[Any] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
__snake_case : Optional[int] = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = jnp.floataa
def lowercase_ ( self ):
__snake_case : Optional[int] = []
for i in range(self.num_layers ):
__snake_case : Dict = self.in_channels if i == 0 else self.out_channels
__snake_case : Union[str, Any] = FlaxResnetBlockaD(
in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
__snake_case : Tuple = resnets
if self.add_downsample:
__snake_case : Tuple = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
__snake_case : List[Any] = ()
for resnet in self.resnets:
__snake_case : int = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
__snake_case : List[Any] = self.downsamplers_a(_UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def lowercase_ ( self ):
__snake_case : Dict = []
__snake_case : List[Any] = []
for i in range(self.num_layers ):
__snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__snake_case : str = self.prev_output_channel if i == 0 else self.out_channels
__snake_case : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
__snake_case : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
__snake_case : int = resnets
__snake_case : Optional[Any] = attentions
if self.add_upsample:
__snake_case : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__snake_case : List[str] = res_hidden_states_tuple[-1]
__snake_case : Optional[Any] = res_hidden_states_tuple[:-1]
__snake_case : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__snake_case : Dict = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
__snake_case : Optional[int] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
__snake_case : int = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = True
__UpperCAmelCase = jnp.floataa
def lowercase_ ( self ):
__snake_case : Tuple = []
for i in range(self.num_layers ):
__snake_case : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__snake_case : Dict = self.prev_output_channel if i == 0 else self.out_channels
__snake_case : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
__snake_case : Dict = resnets
if self.add_upsample:
__snake_case : Dict = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
for resnet in self.resnets:
# pop res hidden states
__snake_case : List[str] = res_hidden_states_tuple[-1]
__snake_case : List[str] = res_hidden_states_tuple[:-1]
__snake_case : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__snake_case : int = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
if self.add_upsample:
__snake_case : Tuple = self.upsamplers_a(_UpperCAmelCase )
return hidden_states
class __SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 0.0
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = jnp.floataa
def lowercase_ ( self ):
# there is always at least one resnet
__snake_case : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__snake_case : Dict = []
for _ in range(self.num_layers ):
__snake_case : Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_UpperCAmelCase )
__snake_case : Optional[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_UpperCAmelCase )
__snake_case : Dict = resnets
__snake_case : Optional[Any] = attentions
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
__snake_case : List[str] = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__snake_case : Union[str, Any] = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
__snake_case : List[Any] = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase )
return hidden_states
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
from PIL import Image
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ):
__snake_case : Dict = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__UpperCAmelCase : List[Any] ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(lowerCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__magic_name__ = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : int ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
__snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : List[Any] = 0
__snake_case : Any = 2
while digits < n:
index += 1
__snake_case : Optional[Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def UpperCAmelCase__( __UpperCAmelCase : int = 10_00 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 701 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 702 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
__UpperCAmelCase = "CIDAS/clipseg-rd64-refined"
__UpperCAmelCase = "image_segmenter"
__UpperCAmelCase = CLIPSegForImageSegmentation
__UpperCAmelCase = ["image", "text"]
__UpperCAmelCase = ["image"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['vision'] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='pt' )
def lowercase_ ( self , _UpperCAmelCase ):
with torch.no_grad():
__snake_case : str = self.model(**_UpperCAmelCase ).logits
return logits
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = outputs.cpu().detach().numpy()
__snake_case : Optional[int] = 0
__snake_case : int = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 703 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Dict = module._original_module if isinstance(_UpperCAmelCase , _PatchedModuleObj ) else module
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Union[str, Any] = obj
__snake_case : List[str] = target
__snake_case : Tuple = new
__snake_case : Any = target.split('.' )[0]
__snake_case : Dict = {}
__snake_case : str = attrs or []
def __enter__( self ):
__snake_case : Tuple = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Tuple = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Optional[int] = getattr(self.obj , _UpperCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__snake_case : Union[str, Any] = obj_attr
# patch at top level
setattr(self.obj , _UpperCAmelCase , _PatchedModuleObj(_UpperCAmelCase , attrs=self.attrs ) )
__snake_case : str = getattr(self.obj , _UpperCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_UpperCAmelCase , _UpperCAmelCase , _PatchedModuleObj(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , attrs=self.attrs ) )
__snake_case : Any = getattr(_UpperCAmelCase , _UpperCAmelCase )
# finally set the target attribute
setattr(_UpperCAmelCase , _UpperCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : Tuple = getattr(import_module('.'.join(_UpperCAmelCase ) ) , _UpperCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _UpperCAmelCase ) is attr_value:
__snake_case : List[Any] = getattr(self.obj , _UpperCAmelCase )
setattr(self.obj , _UpperCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Optional[Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , _UpperCAmelCase , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *_UpperCAmelCase ):
for attr in list(self.original ):
setattr(self.obj , _UpperCAmelCase , self.original.pop(_UpperCAmelCase ) )
def lowercase_ ( self ):
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 705 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : list ):
def merge(__UpperCAmelCase : list , __UpperCAmelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__UpperCAmelCase ) <= 1:
return collection
__snake_case : Optional[int] = len(__UpperCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 706 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "efficientnet"
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.25 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2_560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 0.001 , _UpperCAmelCase = 0.99 , _UpperCAmelCase = 0.5 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : List[Any] = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = width_coefficient
__snake_case : Optional[Any] = depth_coefficient
__snake_case : Tuple = depth_divisor
__snake_case : Dict = kernel_sizes
__snake_case : List[Any] = in_channels
__snake_case : Dict = out_channels
__snake_case : Optional[Any] = depthwise_padding
__snake_case : Tuple = strides
__snake_case : Optional[Any] = num_block_repeats
__snake_case : Tuple = expand_ratios
__snake_case : Optional[Any] = squeeze_expansion_ratio
__snake_case : Any = hidden_act
__snake_case : Optional[Any] = hidden_dim
__snake_case : Dict = pooling_type
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = batch_norm_eps
__snake_case : int = batch_norm_momentum
__snake_case : Dict = dropout_rate
__snake_case : str = drop_connect_rate
__snake_case : List[Any] = sum(_UpperCAmelCase ) * 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = version.parse("1.11")
@property
def lowercase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase_ ( self ):
return 1E-5
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''allenai/led-base-16384''': 16_384,
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = LEDTokenizer
__UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
__snake_case : List[str] = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
__snake_case : Dict = add_prefix_space
__snake_case : int = pre_tok_class(**_UpperCAmelCase )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : int = 'post_processor'
__snake_case : Any = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
__snake_case : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Dict = tuple(state['sep'] )
if "cls" in state:
__snake_case : int = tuple(state['cls'] )
__snake_case : Optional[int] = False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
__snake_case : Optional[int] = add_prefix_space
__snake_case : Optional[Any] = True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
__snake_case : Dict = trim_offsets
__snake_case : List[str] = True
if changes_to_apply:
__snake_case : List[Any] = getattr(_UpperCAmelCase , state.pop('type' ) )
__snake_case : Union[str, Any] = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
__snake_case : Union[str, Any] = value
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Union[str, Any] = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Union[str, Any] = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : Any = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
__snake_case : List[str] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case : Optional[Any] = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
__snake_case : Dict = len(_UpperCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case : Optional[int] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 708 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 0 |
'''simple docstring'''
def UpperCAmelCase__( __UpperCAmelCase : int = 10_00 ):
__snake_case : Union[str, Any] = -1
__snake_case : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__snake_case : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
__snake_case : Dict = n - a - b
if c * c == (a * a + b * b):
__snake_case : str = a * b * c
if candidate >= product:
__snake_case : List[str] = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 710 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = BartphoTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Tuple = ['▁This', '▁is', '▁a', '▁t', 'est']
__snake_case : Dict = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : str = {'unk_token': '<unk>'}
__snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__snake_case : List[str] = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = 'This is a là test'
__snake_case : Optional[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : List[Any] = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case : Tuple = 'This is a là test'
__snake_case : Tuple = '▁This ▁is ▁a ▁l à ▁t est'.split()
__snake_case : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = tokens + [tokenizer.unk_token]
__snake_case : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) | 711 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = (CMStochasticIterativeScheduler,)
__UpperCAmelCase = 1_0
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : str = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**_UpperCAmelCase )
return config
def lowercase_ ( self ):
__snake_case : List[Any] = 10
__snake_case : Dict = self.get_scheduler_config()
__snake_case : Tuple = self.scheduler_classes[0](**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
__snake_case : str = scheduler.timesteps[0]
__snake_case : str = scheduler.timesteps[1]
__snake_case : Dict = self.dummy_sample
__snake_case : List[str] = 0.1 * sample
__snake_case : Optional[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
__snake_case : List[str] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : List[Any] = scheduler_class(**_UpperCAmelCase )
__snake_case : Union[str, Any] = 1
scheduler.set_timesteps(_UpperCAmelCase )
__snake_case : Dict = scheduler.timesteps
__snake_case : str = torch.manual_seed(0 )
__snake_case : List[str] = self.dummy_model()
__snake_case : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_UpperCAmelCase ):
# 1. scale model input
__snake_case : int = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict noise residual
__snake_case : Tuple = model(_UpperCAmelCase , _UpperCAmelCase )
# 3. predict previous sample x_t-1
__snake_case : Any = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__snake_case : str = pred_prev_sample
__snake_case : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : str = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**_UpperCAmelCase )
__snake_case : Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__snake_case : Any = scheduler.timesteps
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : str = self.dummy_model()
__snake_case : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__snake_case : Optional[Any] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict noise residual
__snake_case : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 3. predict previous sample x_t-1
__snake_case : List[Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__snake_case : Tuple = pred_prev_sample
__snake_case : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : Dict = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowercase_ ( self ):
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : Any = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**_UpperCAmelCase )
__snake_case : str = [39, 30, 12, 15, 0]
with self.assertRaises(_UpperCAmelCase , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : int = scheduler_class(**_UpperCAmelCase )
__snake_case : List[str] = [39, 30, 12, 1, 0]
__snake_case : Optional[Any] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : Dict = self.get_scheduler_config()
__snake_case : str = scheduler_class(**_UpperCAmelCase )
__snake_case : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 713 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 0 |
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase = "" , _UpperCAmelCase = False ):
# Mapping from the first character of the prefix of the node
__snake_case : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__snake_case : str = is_leaf
__snake_case : str = prefix
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = 0
for q, w in zip(self.prefix , _UpperCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase_ ( self , _UpperCAmelCase ):
for word in words:
self.insert(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__snake_case : str = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__snake_case : int = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase )
else:
__snake_case : Dict = self.nodes[word[0]]
__snake_case : str = incoming_node.match(
_UpperCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__snake_case : int = remaining_prefix
__snake_case : Optional[int] = self.nodes[matching_string[0]]
__snake_case : int = RadixNode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[int] = aux_node
if remaining_word == "":
__snake_case : Tuple = True
else:
self.nodes[matching_string[0]].insert(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
__snake_case : int = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = self.nodes.get(word[0] , _UpperCAmelCase )
if not incoming_node:
return False
else:
__snake_case : Any = incoming_node.match(
_UpperCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_UpperCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__snake_case : int = list(self.nodes.values() )[0]
__snake_case : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
__snake_case : Any = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__snake_case : Union[str, Any] = False
# If there is 1 edge, we merge it with its child
else:
__snake_case : List[str] = list(incoming_node.nodes.values() )[0]
__snake_case : List[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__snake_case : Tuple = merging_node.nodes
return True
def lowercase_ ( self , _UpperCAmelCase = 0 ):
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCAmelCase__( ):
__snake_case : Optional[int] = 'banana bananas bandana band apple all beast'.split()
__snake_case : Tuple = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def UpperCAmelCase__( ):
assert test_trie()
def UpperCAmelCase__( ):
__snake_case : Optional[Any] = RadixNode()
__snake_case : Optional[int] = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(__UpperCAmelCase )
print('Words:' , __UpperCAmelCase )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 714 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__snake_case : Any = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = logging.get_verbosity()
__snake_case : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
__snake_case : List[str] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning(_UpperCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning(_UpperCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning(_UpperCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def lowercase_ ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case : Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart' )
__snake_case : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , _UpperCAmelCase )
__snake_case : List[Any] = logging.log_levels[env_level_str]
__snake_case : Optional[int] = logging.get_verbosity()
self.assertEqual(
_UpperCAmelCase , _UpperCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case : Union[str, Any] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def lowercase_ ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__snake_case : Optional[int] = logging.logging.getLogger()
with CaptureLogger(_UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def lowercase_ ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__snake_case : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
__snake_case : Dict = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning_advice(_UpperCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning_advice(_UpperCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def UpperCAmelCase__( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 715 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__( __UpperCAmelCase : str ):
return EnvironmentCommand()
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
return EnvironmentCommand(args.accelerate_config_file )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
__snake_case : Any = parser.add_parser('env' )
download_parser.set_defaults(func=_UpperCAmelCase )
download_parser.add_argument(
'--accelerate-config_file' , default=_UpperCAmelCase , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , *_UpperCAmelCase ):
__snake_case : List[str] = accelerate_config_file
def lowercase_ ( self ):
__snake_case : Optional[int] = 'not installed'
if is_safetensors_available():
import safetensors
__snake_case : List[Any] = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
__snake_case : str = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
__snake_case : List[Any] = 'not installed'
__snake_case : Tuple = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__snake_case : List[str] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_UpperCAmelCase ):
__snake_case : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
__snake_case : int = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else F"""\t{accelerate_config}"""
)
__snake_case : Union[str, Any] = 'not installed'
__snake_case : Union[str, Any] = 'NA'
if is_torch_available():
import torch
__snake_case : str = torch.__version__
__snake_case : Union[str, Any] = torch.cuda.is_available()
__snake_case : Any = 'not installed'
__snake_case : Union[str, Any] = 'NA'
if is_tf_available():
import tensorflow as tf
__snake_case : Tuple = tf.__version__
try:
# deprecated in v2.1
__snake_case : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__snake_case : Tuple = bool(tf.config.list_physical_devices('GPU' ) )
__snake_case : List[Any] = 'not installed'
__snake_case : Any = 'not installed'
__snake_case : int = 'not installed'
__snake_case : Union[str, Any] = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
__snake_case : List[Any] = flax.__version__
__snake_case : Optional[int] = jax.__version__
__snake_case : str = jaxlib.__version__
__snake_case : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
__snake_case : int = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F"""{safetensors_version}""",
'Accelerate version': F"""{accelerate_version}""",
'Accelerate config': F"""{accelerate_config_str}""",
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Tensorflow version (GPU?)': F"""{tf_version} ({tf_cuda_available})""",
'Flax version (CPU?/GPU?/TPU?)': F"""{flax_version} ({jax_backend})""",
'Jax version': F"""{jax_version}""",
'JaxLib version': F"""{jaxlib_version}""",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_UpperCAmelCase ) )
return info
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 716 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."})
__UpperCAmelCase = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size for training."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size for evaluation."})
__UpperCAmelCase = field(default=0.1 , metadata={"help": "Value of weight decay."})
__UpperCAmelCase = field(
default=1_0_0_0_0 , metadata={"help": "Size of buffer used to shuffle streaming dataset."})
__UpperCAmelCase = field(default=2E-4 , metadata={"help": "Learning rate fo training."})
__UpperCAmelCase = field(default="cosine" , metadata={"help": "Learning rate."})
__UpperCAmelCase = field(
default=7_5_0 , metadata={"help": "Number of warmup steps in the learning rate schedule."})
__UpperCAmelCase = field(
default=1_6 , metadata={"help": "Number of gradient accumulation steps."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."})
__UpperCAmelCase = field(default=5_0_0_0_0 , metadata={"help": "Maximum number of training steps."})
__UpperCAmelCase = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."})
__UpperCAmelCase = field(default=1_0_2_4 , metadata={"help": "Sequence lengths used for training."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Training seed."})
__UpperCAmelCase = field(
default=1_0_2_4 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "States path if the training should continue from a checkpoint folder."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "If True the data is pretokenized."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size used for evaluation."})
__UpperCAmelCase = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."})
__UpperCAmelCase = field(default=1_0_2_4 , metadata={"help": "Length of sequences to be evaluated."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Random seed used for evaluation."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Sample from the language model's output distribution."})
__UpperCAmelCase = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."})
__UpperCAmelCase = field(default=2_5_6 , metadata={"help": "Maximum number of newly generated tokens."})
__UpperCAmelCase = field(default=0 , metadata={"help": "Top-k parameter used for generation."})
__UpperCAmelCase = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."})
__UpperCAmelCase = field(default=1_0 , metadata={"help": "Number of generations to run in parallel."})
__UpperCAmelCase = field(
default=2_0_0 , metadata={"help": "Number of completions to generate for each sample."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Random seed used for evaluation."})
__UpperCAmelCase = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."})
__UpperCAmelCase = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"})
__UpperCAmelCase = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
__UpperCAmelCase = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."})
__UpperCAmelCase = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."})
__UpperCAmelCase = field(
default=1_0_0_0_0_0 , metadata={"help": "Number of files to save per JSON output file."})
__UpperCAmelCase = field(default="content" , metadata={"help": "Column containing text data to process."})
__UpperCAmelCase = field(
default=1_0_0_0 , metadata={"help": "Maximum line length in file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=1_0_0 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."})
__UpperCAmelCase = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "If True, near-duplicate samples are removed."})
__UpperCAmelCase = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."})
__UpperCAmelCase = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."})
__UpperCAmelCase = field(default="content" , metadata={"help": "Column containing text data to process."})
__UpperCAmelCase = field(default=2_0_0_0_0_0 , metadata={"help": "Number of examples to train tokenizer on."})
__UpperCAmelCase = field(
default=3_2_7_6_8 , metadata={"help": "Number of examples to train the tokenizer on."})
__UpperCAmelCase = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."})
__UpperCAmelCase = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."})
__UpperCAmelCase = field(default="codeparrot" , metadata={"help": "Name of the created model."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."})
| 717 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> str:
__snake_case : Optional[int] = []
for part_id in partition_order:
__snake_case : str = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__UpperCAmelCase ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__( ) -> Union[str, Any]:
__snake_case : Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__snake_case : str = spark.range(1_00 ).repartition(1 )
__snake_case : Union[str, Any] = Spark(__UpperCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__( ) -> List[str]:
__snake_case : Union[str, Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__snake_case : List[str] = spark.range(10 ).repartition(2 )
__snake_case : Any = [1, 0]
__snake_case : Dict = _generate_iterable_examples(__UpperCAmelCase , __UpperCAmelCase ) # Reverse the partitions.
__snake_case : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , __UpperCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__( ) -> List[Any]:
__snake_case : str = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__snake_case : Dict = spark.range(10 ).repartition(1 )
__snake_case : List[str] = SparkExamplesIterable(__UpperCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__( ) -> Dict:
__snake_case : Any = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__snake_case : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
__snake_case : List[Any] = lambda __UpperCAmelCase : x.reverse()
__snake_case : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [2, 1, 0] )
__snake_case : int = SparkExamplesIterable(__UpperCAmelCase ).shuffle_data_sources(__UpperCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
__snake_case : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__( ) -> Optional[int]:
__snake_case : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__snake_case : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case : List[str] = SparkExamplesIterable(__UpperCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
__snake_case : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case : Any = SparkExamplesIterable(__UpperCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__UpperCAmelCase ):
__snake_case : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase__( ) -> Optional[Any]:
__snake_case : Any = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__snake_case : int = spark.range(1_00 ).repartition(1 )
__snake_case : Optional[int] = Spark(__UpperCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 718 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__snake_case : str = number_of_bytes // partitions
__snake_case : Union[str, Any] = []
for i in range(__UpperCAmelCase ):
__snake_case : Tuple = i * bytes_per_partition + 1
__snake_case : List[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''PerceiverFeatureExtractor''']
__magic_name__ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 0 |
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , ):
__snake_case : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__snake_case : Any = 1 - (matter_density + radiation_density + dark_energy)
__snake_case : Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__snake_case : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__magic_name__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 721 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.