code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class snake_case_ :
UpperCAmelCase__ : Optional[Union[str, Path]] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Dict] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Optional[Union[str, bool]] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Dict] = None
UpperCAmelCase__ : Optional[str] = None
def lowerCamelCase__( self :Optional[Any] ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
| 240 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
SCREAMING_SNAKE_CASE = BlipaProcessor(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,**lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__SCREAMING_SNAKE_CASE ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,**lowerCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**__SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE ,padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=__SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE ,image_processor=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__SCREAMING_SNAKE_CASE ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE ,image_processor=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE ,image_processor=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE ,images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE ,image_processor=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE ,image_processor=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE ,images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 296 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_: Tuple = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE_: int = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
SCREAMING_SNAKE_CASE_: Optional[int] = s_dict.pop(snake_case__ )
return s_dict
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = emb.weight.shape
SCREAMING_SNAKE_CASE_: List[str] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
SCREAMING_SNAKE_CASE_: List[str] = emb.weight.data
return lin_layer
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
SCREAMING_SNAKE_CASE_: List[str] = os.path.basename(snake_case__ )
SCREAMING_SNAKE_CASE_: List[Any] = url.split("/" )[-2]
SCREAMING_SNAKE_CASE_: List[str] = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
SCREAMING_SNAKE_CASE_: int = open(snake_case__ , "rb" ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=snake_case__ , unit_divisor=10_24 ) as loop:
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = source.read(81_92 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
SCREAMING_SNAKE_CASE_: Tuple = open(snake_case__ , "rb" ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE_: Optional[int] = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE_: Any = torch.load(snake_case__ , map_location="cpu" )
SCREAMING_SNAKE_CASE_: Union[str, Any] = original_checkpoint["dims"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = original_checkpoint["model_state_dict"]
SCREAMING_SNAKE_CASE_: Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
SCREAMING_SNAKE_CASE_: int = True
SCREAMING_SNAKE_CASE_: Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
SCREAMING_SNAKE_CASE_: Any = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = WhisperForConditionalGeneration(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}" )
if tie_embeds:
SCREAMING_SNAKE_CASE_: int = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE_: Dict = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 13 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase ( snake_case__ ):
random.seed(snake_case__ )
np.random.seed(snake_case__ )
torch.manual_seed(snake_case__ )
torch.cuda.manual_seed_all(snake_case__ )
# ^^ safe to call this function even if cuda is not available
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 0.9_999 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 0 , _UpperCAmelCase = False , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 2 / 3 , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> Dict:
if isinstance(__SCREAMING_SNAKE_CASE , torch.nn.Module ):
__UpperCamelCase : Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE , )
__UpperCamelCase : List[Any] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__UpperCamelCase : Optional[Any] = True
if kwargs.get("max_value" , __SCREAMING_SNAKE_CASE ) is not None:
__UpperCamelCase : Dict = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Any = kwargs["max_value"]
if kwargs.get("min_value" , __SCREAMING_SNAKE_CASE ) is not None:
__UpperCamelCase : Optional[Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Any = kwargs["min_value"]
__UpperCamelCase : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device" , __SCREAMING_SNAKE_CASE ) is not None:
__UpperCamelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
self.to(device=kwargs["device"] )
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : str = decay
__UpperCamelCase : Optional[int] = min_decay
__UpperCamelCase : Tuple = update_after_step
__UpperCamelCase : Dict = use_ema_warmup
__UpperCamelCase : Optional[Any] = inv_gamma
__UpperCamelCase : Tuple = power
__UpperCamelCase : Any = 0
__UpperCamelCase : Union[str, Any] = None # set in `step()`
__UpperCamelCase : Union[str, Any] = model_cls
__UpperCamelCase : int = model_config
@classmethod
def a_ (cls , _UpperCAmelCase , _UpperCAmelCase ) -> "EMAModel":
__UpperCamelCase , __UpperCamelCase : Tuple = model_cls.load_config(__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Any = model_cls.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[int] = cls(model.parameters() , model_cls=__SCREAMING_SNAKE_CASE , model_config=model.config )
ema_model.load_state_dict(__SCREAMING_SNAKE_CASE )
return ema_model
def a_ (self , _UpperCAmelCase ) -> Tuple:
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
__UpperCamelCase : Union[str, Any] = self.model_cls.from_config(self.model_config )
__UpperCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params" , __SCREAMING_SNAKE_CASE )
model.register_to_config(**__SCREAMING_SNAKE_CASE )
self.copy_to(model.parameters() )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
def a_ (self , _UpperCAmelCase ) -> float:
__UpperCamelCase : Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__UpperCamelCase : Any = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__UpperCamelCase : List[str] = (1 + step) / (1_0 + step)
__UpperCamelCase : Union[str, Any] = min(__SCREAMING_SNAKE_CASE , self.decay )
# make sure decay is not smaller than min_decay
__UpperCamelCase : int = max(__SCREAMING_SNAKE_CASE , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a_ (self , _UpperCAmelCase ) -> List[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , torch.nn.Module ):
__UpperCamelCase : str = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE , )
__UpperCamelCase : Tuple = parameters.parameters()
__UpperCamelCase : str = list(__SCREAMING_SNAKE_CASE )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__UpperCamelCase : Union[str, Any] = self.get_decay(self.optimization_step )
__UpperCamelCase : Any = decay
__UpperCamelCase : Union[str, Any] = 1 - decay
__UpperCamelCase : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __SCREAMING_SNAKE_CASE ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__UpperCamelCase : Union[str, Any] = deepspeed.zero.GatheredParameters(__SCREAMING_SNAKE_CASE , modifier_rank=__SCREAMING_SNAKE_CASE )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__SCREAMING_SNAKE_CASE )
def a_ (self , _UpperCAmelCase ) -> None:
__UpperCamelCase : Tuple = list(__SCREAMING_SNAKE_CASE )
for s_param, param in zip(self.shadow_params , __SCREAMING_SNAKE_CASE ):
param.data.copy_(s_param.to(param.device ).data )
def a_ (self , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> None:
__UpperCamelCase : Union[str, Any] = [
p.to(device=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE ) if p.is_floating_point() else p.to(device=__SCREAMING_SNAKE_CASE )
for p in self.shadow_params
]
def a_ (self ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a_ (self , _UpperCAmelCase ) -> None:
__UpperCamelCase : str = [param.detach().cpu().clone() for param in parameters]
def a_ (self , _UpperCAmelCase ) -> None:
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , __SCREAMING_SNAKE_CASE ):
param.data.copy_(c_param.data )
# Better memory-wise.
__UpperCamelCase : Optional[Any] = None
def a_ (self , _UpperCAmelCase ) -> None:
__UpperCamelCase : List[Any] = copy.deepcopy(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : int = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
__UpperCamelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , __SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid min_decay" )
__UpperCamelCase : int = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , __SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid optimization_step" )
__UpperCamelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , __SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid update_after_step" )
__UpperCamelCase : Dict = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid use_ema_warmup" )
__UpperCamelCase : Union[str, Any] = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
__UpperCamelCase : int = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
__UpperCamelCase : List[Any] = state_dict.get("shadow_params" , __SCREAMING_SNAKE_CASE )
if shadow_params is not None:
__UpperCamelCase : Any = shadow_params
if not isinstance(self.shadow_params , __SCREAMING_SNAKE_CASE ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 298 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowercase__ :
'''simple docstring'''
a : List[str] = MBartConfig
a : Any = {}
a : int = """gelu"""
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=7, __magic_name__=True, __magic_name__=False, __magic_name__=99, __magic_name__=32, __magic_name__=2, __magic_name__=4, __magic_name__=37, __magic_name__=0.1, __magic_name__=0.1, __magic_name__=20, __magic_name__=2, __magic_name__=1, __magic_name__=0, ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Union[str, Any] = seq_length
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : Any = hidden_size
UpperCamelCase__ : int = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Any = eos_token_id
UpperCamelCase__ : Optional[int] = pad_token_id
UpperCamelCase__ : int = bos_token_id
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
UpperCamelCase__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
UpperCamelCase__ : List[str] = tf.concat([input_ids, eos_tensor], axis=1 )
UpperCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
UpperCamelCase__ : Dict = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
UpperCamelCase__ : List[Any] = inputs_dict['''input_ids''']
UpperCamelCase__ : Any = input_ids[:1, :]
UpperCamelCase__ : Union[str, Any] = inputs_dict['''attention_mask'''][:1, :]
UpperCamelCase__ : Tuple = inputs_dict['''head_mask''']
UpperCamelCase__ : Optional[Any] = 1
# first forward pass
UpperCamelCase__ : Tuple = model(__SCREAMING_SNAKE_CASE, attention_mask=__SCREAMING_SNAKE_CASE, head_mask=__SCREAMING_SNAKE_CASE, use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ ,UpperCamelCase__ : str = outputs.to_tuple()
UpperCamelCase__ : Optional[int] = past_key_values[1]
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: str , __UpperCAmelCase: List[Any] , __UpperCAmelCase: int=None , __UpperCAmelCase: Union[str, Any]=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: int=None , __UpperCAmelCase: str=None , ) -> List[str]:
if attention_mask is None:
UpperCamelCase__ : Tuple = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
a : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
a : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
a : Union[str, Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
a : int = True
a : str = False
a : str = False
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = TFMBartModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
a : List[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
a : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
a : str = """facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase__ ( self, **__magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = self.translate_src_text(**__SCREAMING_SNAKE_CASE )
self.assertListEqual(self.expected_text, __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.tokenizer(self.src_text, **__SCREAMING_SNAKE_CASE, return_tensors='''tf''' )
UpperCamelCase__ : List[Any] = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
UpperCamelCase__ : Dict = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE, skip_special_tokens=__SCREAMING_SNAKE_CASE )
return generated_words
@slow
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 201 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
lowerCAmelCase : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = input('Enter message: ' )
SCREAMING_SNAKE_CASE_ : Any = input('Enter key [alphanumeric]: ' )
SCREAMING_SNAKE_CASE_ : str = input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
SCREAMING_SNAKE_CASE_ : Tuple = 'encrypt'
SCREAMING_SNAKE_CASE_ : Optional[Any] = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('d' ):
SCREAMING_SNAKE_CASE_ : Dict = 'decrypt'
SCREAMING_SNAKE_CASE_ : Optional[Any] = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def A_ ( a , a ):
"""simple docstring"""
return translate_message(snake_case__ , snake_case__ , 'encrypt' )
def A_ ( a , a ):
"""simple docstring"""
return translate_message(snake_case__ , snake_case__ , 'decrypt' )
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE_ : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 253 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class _a :
__a : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__a : Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__a : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__a : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__a : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__a : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__a : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__a : Optional[int] = field(
default=10_000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__a : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
__a : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__a : Optional[int] = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__a : Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__a : Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__a : Optional[int] = field(default=50_000 , metadata={"""help""": """Maximum number of training steps."""} )
__a : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__a : Optional[int] = field(default=1_024 , metadata={"""help""": """Sequence lengths used for training."""} )
__a : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} )
__a : Optional[int] = field(
default=1_024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__a : Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class _a :
__a : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__a : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__a : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__a : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__a : Optional[int] = field(default=1_024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__a : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class _a :
__a : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__a : Optional[int] = field(default=UpperCamelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""} )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__a : Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Sample from the language model's output distribution."""} )
__a : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__a : Optional[int] = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__a : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__a : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__a : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__a : Optional[int] = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__a : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__a : Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__a : Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__a : Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class _a :
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__a : Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__a : Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__a : Optional[int] = field(
default=100_000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__a : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__a : Optional[float] = field(
default=1_000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__a : Optional[float] = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__a : Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__a : Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__a : Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__a : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__a : Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__a : Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class _a :
__a : Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__a : Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__a : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__a : Optional[int] = field(default=200_000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__a : Optional[int] = field(
default=32_768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__a : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__a : Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class _a :
__a : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__a : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__a : Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__a : Optional[int] = field(default=UpperCamelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class _a :
__a : Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__a : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__a : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__a : Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 34 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> int:
if n == 1 or not isinstance(snake_case__ ,snake_case__ ):
return 0
elif n == 2:
return 1
else:
__lowerCamelCase : int = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : Any = 0
__lowerCamelCase : List[Any] = 2
while digits < n:
index += 1
__lowerCamelCase : Any = len(str(fibonacci(snake_case__ ) ) )
return index
def a_ ( _lowerCAmelCase = 1000 ) -> int:
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 208 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 318 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ):
if len(snake_case__ ) == 0:
return False
__lowercase : str = len(snake_case__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case__ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case__ )
if __name__ == "__main__":
lowerCamelCase : str = input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : Tuple = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : List[Any] = '''''' if binary_search(sequence, target) else '''not '''
print(f'''{target} was {not_str}found in {sequence}''') | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
from math import loga
def lowerCamelCase__ ( _A ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : str=0 ):
return sorted(snake_case__ , key=lambda __lowerCAmelCase : x[column] )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=float('inf' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , snake_case__ ):
a__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
a__ = current_dis
return min_dis
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=float('inf' ) ):
for i in range(min(6 , points_counts - 1 ) , snake_case__ ):
for j in range(max(0 , i - 6 ) , snake_case__ ):
a__ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
a__ = current_dis
return min_dis
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(snake_case__ , snake_case__ )
# recursion
a__ = points_counts // 2
a__ = closest_pair_of_points_sqr(
snake_case__ , points_sorted_on_y[:mid] , snake_case__ )
a__ = closest_pair_of_points_sqr(
snake_case__ , points_sorted_on_y[mid:] , points_counts - mid )
a__ = min(snake_case__ , snake_case__ )
a__ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(snake_case__ )
a__ = dis_between_closest_in_strip(
snake_case__ , len(snake_case__ ) , snake_case__ )
return min(snake_case__ , snake_case__ )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
a__ = column_based_sort(snake_case__ , column=0 )
a__ = column_based_sort(snake_case__ , column=1 )
return (
closest_pair_of_points_sqr(
snake_case__ , snake_case__ , snake_case__ )
) ** 0.5
if __name__ == "__main__":
snake_case : str = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 240 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : int=False ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Any=19 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Optional[int]=37 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[int]=512 ,lowerCamelCase__ : int=16 ,lowerCamelCase__ : List[Any]=2 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : List[str]=None ,) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=__SCREAMING_SNAKE_CASE ,esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} ,)
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = False
__snake_case : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__snake_case : List[Any] = ()
__snake_case : Tuple = {} if is_torch_available() else {}
__snake_case : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip("""Does not support attention outputs""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""This test doesn\'t work for ESMFold and doesn\'t test core functionality""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support torchscript compilation.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn\'t support data parallel.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
SCREAMING_SNAKE_CASE = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )["""positions"""]
SCREAMING_SNAKE_CASE = torch.tensor([2.5828, 0.7993, -10.9334] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
| 296 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[Any]=512 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Tuple=4 , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = parent
SCREAMING_SNAKE_CASE_: Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_: Dict = seq_length
SCREAMING_SNAKE_CASE_: int = is_training
SCREAMING_SNAKE_CASE_: str = use_attention_mask
SCREAMING_SNAKE_CASE_: List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_: Any = use_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: str = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Any = max_position_embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_: Any = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_: str = num_choices
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_: List[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __lowercase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[str] = FlaxAlbertModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class_name.from_pretrained("albert-base-v2")
SCREAMING_SNAKE_CASE_: Dict = model(np.ones((1, 1)))
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = FlaxAlbertModel.from_pretrained("albert-base-v2")
SCREAMING_SNAKE_CASE_: Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
SCREAMING_SNAKE_CASE_: str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
SCREAMING_SNAKE_CASE_: str = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
SCREAMING_SNAKE_CASE_: Dict = (1, 11, 768)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: int = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 13 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ) -> Any:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : Union[str, Any] = seq_length
__UpperCamelCase : List[str] = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Union[str, Any] = use_token_type_ids
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : Optional[int] = vocab_size
__UpperCamelCase : str = hidden_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : Any = intermediate_size
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : List[Any] = attention_probs_dropout_prob
__UpperCamelCase : int = max_position_embeddings
__UpperCamelCase : Optional[int] = type_vocab_size
__UpperCamelCase : str = type_sequence_label_size
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Optional[int] = num_choices
def a_ (self ) -> Tuple:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = config_and_inputs
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A = True
A = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ (self ) -> int:
__UpperCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def a_ (self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase : Any = model_class_name.from_pretrained("roberta-base" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Union[str, Any] ) -> list[int]:
UpperCamelCase__ : Optional[int] = int(snake_case__ )
# Initialize Result
UpperCamelCase__ : Optional[Any] = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = '''0'''
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase_ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
UpperCAmelCase_ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase_ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 201 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
from __future__ import annotations
lowerCAmelCase : Optional[int] = tuple[int, int, int]
lowerCAmelCase : str = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase : Tuple = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
lowerCAmelCase : Optional[Any] = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
lowerCAmelCase : Tuple = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
lowerCAmelCase : List[str] = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase : Dict = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
lowerCAmelCase : Optional[Any] = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
lowerCAmelCase : Any = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
lowerCAmelCase : Union[str, Any] = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
lowerCAmelCase : Dict = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
lowerCAmelCase : Optional[int] = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def A_ ( a , a , a ):
"""simple docstring"""
if (unique_rotsel := len(set(snake_case__ ) )) < 3:
SCREAMING_SNAKE_CASE_ : Tuple = f"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(snake_case__ )
# Checks if rotor positions are valid
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rotpos
if not 0 < rotorposa <= len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = f"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(snake_case__ )
if not 0 < rotorposa <= len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = f"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(snake_case__ )
if not 0 < rotorposa <= len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = f"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(snake_case__ )
# Validates string and returns dict
SCREAMING_SNAKE_CASE_ : List[str] = _plugboard(snake_case__ )
return rotpos, rotsel, pbdict
def A_ ( a ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = f"Plugboard setting isn't type string ({type(snake_case__ )})"
raise TypeError(snake_case__ )
elif len(snake_case__ ) % 2 != 0:
SCREAMING_SNAKE_CASE_ : List[Any] = f"Odd number of symbols ({len(snake_case__ )})"
raise Exception(snake_case__ )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set()
for i in pbstring:
if i not in abc:
SCREAMING_SNAKE_CASE_ : Optional[int] = f"'{i}' not in list of symbols"
raise Exception(snake_case__ )
elif i in tmppbl:
SCREAMING_SNAKE_CASE_ : Optional[int] = f"Duplicate symbol ({i})"
raise Exception(snake_case__ )
else:
tmppbl.add(snake_case__ )
del tmppbl
# Created the dictionary
SCREAMING_SNAKE_CASE_ : str = {}
for j in range(0 , len(snake_case__ ) - 1 , 2 ):
SCREAMING_SNAKE_CASE_ : str = pbstring[j + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = pbstring[j]
return pb
def A_ ( a , a , a = (rotora, rotora, rotora) , a = "" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = text.upper()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = _validator(
snake_case__ , snake_case__ , plugb.upper() )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = rotor_position
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
SCREAMING_SNAKE_CASE_ : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
SCREAMING_SNAKE_CASE_ : Dict = abc.index(snake_case__ ) + rotorposa
SCREAMING_SNAKE_CASE_ : Optional[int] = rotora[index % len(snake_case__ )]
# rotor rb --------------------------
SCREAMING_SNAKE_CASE_ : List[str] = abc.index(snake_case__ ) + rotorposa
SCREAMING_SNAKE_CASE_ : List[Any] = rotora[index % len(snake_case__ )]
# rotor rc --------------------------
SCREAMING_SNAKE_CASE_ : List[str] = abc.index(snake_case__ ) + rotorposa
SCREAMING_SNAKE_CASE_ : List[str] = rotora[index % len(snake_case__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
SCREAMING_SNAKE_CASE_ : Optional[Any] = reflector[symbol]
# 2nd rotors
SCREAMING_SNAKE_CASE_ : List[Any] = abc[rotora.index(snake_case__ ) - rotorposa]
SCREAMING_SNAKE_CASE_ : Any = abc[rotora.index(snake_case__ ) - rotorposa]
SCREAMING_SNAKE_CASE_ : Optional[int] = abc[rotora.index(snake_case__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
SCREAMING_SNAKE_CASE_ : Tuple = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
rotorposa += 1
if rotorposa >= len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : Any = '''This is my Python script that emulates the Enigma machine from WWII.'''
lowerCAmelCase : str = (1, 1, 1)
lowerCAmelCase : Any = '''pictures'''
lowerCAmelCase : Tuple = (rotora, rotora, rotora)
lowerCAmelCase : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 253 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
'''simple docstring'''
from math import sqrt
def snake_case_ (_a : Optional[Any] = 1_0_0_0_0_0_0 ):
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 4_2
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
'''simple docstring'''
import sys
import turtle
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,) -> None:
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(snake_case__ ,get_mid(snake_case__ ,snake_case__ ) ,get_mid(snake_case__ ,snake_case__ ) ,depth - 1 )
triangle(snake_case__ ,get_mid(snake_case__ ,snake_case__ ) ,get_mid(snake_case__ ,snake_case__ ) ,depth - 1 )
triangle(snake_case__ ,get_mid(snake_case__ ,snake_case__ ) ,get_mid(snake_case__ ,snake_case__ ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
_UpperCamelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
_UpperCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 208 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowercase : List[Any] = 4
__lowercase : Dict = 3
class __lowercase ( UpperCamelCase_ ):
pass
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = int(os.environ['''RANK'''] )
lowerCamelCase_ : List[Any] = int(os.environ['''WORLD_SIZE'''] )
lowerCamelCase_ : Optional[int] = ArgumentParser()
parser.add_argument('''--streaming''' , type=snake_case__ )
parser.add_argument('''--local_rank''' , type=snake_case__ )
parser.add_argument('''--num_workers''' , type=snake_case__ , default=0 )
lowerCamelCase_ : Tuple = parser.parse_args()
lowerCamelCase_ : Union[str, Any] = args.streaming
lowerCamelCase_ : int = args.num_workers
lowerCamelCase_ : Optional[int] = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(snake_case__ )]}
lowerCamelCase_ : Any = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
lowerCamelCase_ : Any = Dataset.from_list(list(snake_case__ ) )
lowerCamelCase_ : List[str] = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
lowerCamelCase_ : int = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
lowerCamelCase_ : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase_ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase_ : Optional[int] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 318 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase : Dict = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase: int = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
lowerCAmelCase: Tuple = 1_0
lowerCAmelCase: Tuple = 2_5_6
def lowerCamelCase__ ( _A ):
if len(snake_case__ ) < MIN_NUM_TOKENS:
return None
a : Any = MinHash(num_perm=snake_case__ )
for token in set(snake_case__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase__ ( _A ):
return {t for t in NON_ALPHA.split(snake_case__ ) if len(t.strip() ) > 0}
class a__:
def __init__( self : str , *,
__snake_case : List[Any] = 0.85 , ):
a : Any = duplication_jaccard_threshold
a : Optional[Any] = NUM_PERM
a : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a : Optional[int] = defaultdict(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : Tuple ):
a : Tuple = self._index.query(__SCREAMING_SNAKE_CASE )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : int ):
a : Dict = []
for base, duplicates in self._duplicate_clusters.items():
a : str = [base] + list(__SCREAMING_SNAKE_CASE )
# reformat the cluster to be a list of dict
a : Tuple = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE )
return duplicate_clusters
def lowercase_ ( self : Optional[Any] , __snake_case : Dict ):
a : Union[str, Any] = self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _A ):
a , a : List[Any] = element
a : str = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase__ ( _A ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(snake_case__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase__ ( _A , _A ):
a : List[str] = DuplicationIndex(duplication_jaccard_threshold=snake_case__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case__ ) ) , max_queue_size=100 ) ):
di.add(snake_case__ , snake_case__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase__ ( _A , _A ):
a : Tuple = get_tokens(snake_case__ )
a : str = get_tokens(snake_case__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase: int = None
def lowerCamelCase__ ( _A , _A ):
a : List[str] = []
for elementa in cluster:
a : Optional[int] = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
a : List[str] = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(snake_case__ , snake_case__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a : str = 1
extremes.append(snake_case__ )
return extremes
def lowerCamelCase__ ( _A , _A , _A ):
global _shared_dataset
a : Optional[Any] = dataset
a : List[Any] = []
a : Tuple = partial(_find_cluster_extremes_shared , jaccard_threshold=snake_case__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case__ , snake_case__ , ) , total=len(snake_case__ ) , ):
extremes_list.append(snake_case__ )
return extremes_list
def lowerCamelCase__ ( _A , _A = 0.85 ):
a : Union[str, Any] = make_duplicate_clusters(snake_case__ , snake_case__ )
a : Tuple = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
a : Any = {}
a : Dict = find_extremes(snake_case__ , snake_case__ , snake_case__ )
for extremes in extremes_clusters:
for element in extremes:
a : Optional[int] = element
a : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
a : Dict = dataset.filter(lambda _A , _A : idx not in remove_indices , with_indices=snake_case__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a : Union[str, Any] = element['base_index'] in extreme_dict
if element["is_extreme"]:
a : Union[str, Any] = extreme_dict[element['base_index']]['copies']
print(f"""Original dataset size: {len(snake_case__ )}""" )
print(f"""Number of duplicate clusters: {len(snake_case__ )}""" )
print(f"""Files in duplicate cluster: {len(snake_case__ )}""" )
print(f"""Unique files in duplicate cluster: {len(snake_case__ )}""" )
print(f"""Filtered dataset size: {len(snake_case__ )}""" )
return ds_filter, duplicate_clusters | 297 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case : Union[str, Any] = threading.Lock()
snake_case : Optional[logging.Handler] = None
snake_case : List[Any] = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
snake_case : List[str] = logging.WARNING
snake_case : List[Any] = True
def __lowercase ( ):
a__ = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def __lowercase ( ):
return __name__.split('.' )[0]
def __lowercase ( ):
return logging.getLogger(_get_library_name() )
def __lowercase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
a__ = logging.StreamHandler() # Set sys.stderr as stream.
a__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
a__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
a__ = False
def __lowercase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
a__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
a__ = None
def __lowercase ( ):
return log_levels
def __lowercase ( __lowerCAmelCase : List[Any] = None ):
if name is None:
a__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case__ )
def __lowercase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case__ )
def __lowercase ( ):
return set_verbosity(snake_case__ )
def __lowercase ( ):
return set_verbosity(snake_case__ )
def __lowercase ( ):
return set_verbosity(snake_case__ )
def __lowercase ( ):
return set_verbosity(snake_case__ )
def __lowercase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowercase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowercase ( __lowerCAmelCase : str ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case__ )
def __lowercase ( __lowerCAmelCase : List[str] ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case__ )
def __lowercase ( ):
_configure_library_root_logger()
a__ = False
def __lowercase ( ):
_configure_library_root_logger()
a__ = True
def __lowercase ( ):
a__ = _get_library_root_logger().handlers
for handler in handlers:
a__ = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(snake_case__ )
def __lowercase ( ):
a__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case__ )
def __lowercase ( self : int , *__lowerCAmelCase : str , **__lowerCAmelCase : Tuple ):
a__ = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case__ )
if no_advisory_warnings:
return
self.warning(*snake_case__ , **snake_case__ )
snake_case : Optional[int] = warning_advice
@functools.lru_cache(snake_case__ )
def __lowercase ( self : str , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[Any] ):
self.warning(*snake_case__ , **snake_case__ )
snake_case : Union[str, Any] = warning_once
class snake_case_ :
def __init__( self :Optional[int] ,*__snake_case :List[str] ,**__snake_case :Optional[int] ) -> Optional[int]: # pylint: disable=unused-argument
a__ = args[0] if args else None
def __iter__( self :Union[str, Any] ) -> int:
return iter(self._iterator )
def __getattr__( self :List[Any] ,__snake_case :Optional[int] ) -> Tuple:
def empty_fn(*__snake_case :Optional[int] ,**__snake_case :List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Any ) -> Tuple:
return self
def __exit__( self :List[str] ,__snake_case :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ) -> Any:
return
class snake_case_ :
def __call__( self :Optional[int] ,*__snake_case :List[Any] ,**__snake_case :Any ) -> int:
if _tqdm_active:
return tqdm_lib.tqdm(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Union[str, Any] ,*__snake_case :Dict ,**__snake_case :str ) -> str:
a__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Any ) -> str:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case : int = _tqdm_cls()
def __lowercase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __lowercase ( ):
global _tqdm_active
a__ = True
hf_hub_utils.enable_progress_bars()
def __lowercase ( ):
global _tqdm_active
a__ = False
hf_hub_utils.disable_progress_bars()
| 240 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
import numpy as np
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def __eq__( self : Optional[Any] ,lowerCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
return self.position == cell.position
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
print(self.position )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any]=(5, 5) ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.zeros(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = world_size[0]
SCREAMING_SNAKE_CASE = world_size[1]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
print(self.w )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE = cell.position[0]
SCREAMING_SNAKE_CASE = cell.position[1]
SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE = current_x + n[0]
SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (x, y)
SCREAMING_SNAKE_CASE = cell
neighbours.append(__SCREAMING_SNAKE_CASE )
return neighbours
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
_open.append(snake_case__ )
while _open:
SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(snake_case__ ) )
if current == goal:
break
for n in world.get_neigbours(snake_case__ ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE = current.g + 1
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = n.position
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = goal.position
SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(snake_case__ )
SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Gridworld()
# Start position and goal
SCREAMING_SNAKE_CASE_ = Cell()
SCREAMING_SNAKE_CASE_ = (0, 0)
SCREAMING_SNAKE_CASE_ = Cell()
SCREAMING_SNAKE_CASE_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
SCREAMING_SNAKE_CASE_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
SCREAMING_SNAKE_CASE_ = 1
print(world.w)
| 296 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
from __future__ import annotations
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if len(snake_case__ ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE_: List[Any] = sum(array[:k] )
for i in range(len(snake_case__ ) - k ):
SCREAMING_SNAKE_CASE_: Optional[Any] = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE_: Optional[int] = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase : Tuple = [randint(-1000, 1000) for i in range(100)]
lowerCAmelCase : Optional[int] = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 13 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase ) -> None:
__UpperCamelCase : Dict = num_of_nodes
__UpperCamelCase : Tuple = []
__UpperCamelCase : str = {}
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def a_ (self , _UpperCAmelCase ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a_ (self , _UpperCAmelCase ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
__UpperCamelCase : List[str] = self.find_component(__SCREAMING_SNAKE_CASE )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
if component_size[u_node] <= component_size[v_node]:
__UpperCamelCase : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__SCREAMING_SNAKE_CASE )
elif component_size[u_node] >= component_size[v_node]:
__UpperCamelCase : Any = self.find_component(__SCREAMING_SNAKE_CASE )
component_size[u_node] += component_size[v_node]
self.set_component(__SCREAMING_SNAKE_CASE )
def a_ (self ) -> None:
__UpperCamelCase : Dict = []
__UpperCamelCase : str = 0
__UpperCamelCase : List[str] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__UpperCamelCase : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = edge
__UpperCamelCase : List[Any] = self.m_component[u]
__UpperCamelCase : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__UpperCamelCase : List[str] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = edge
__UpperCamelCase : Optional[int] = self.m_component[u]
__UpperCamelCase : str = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__UpperCamelCase : str = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __lowerCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self, __magic_name__=0.01, __magic_name__=1000 ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = p_stop
UpperCamelCase__ : List[Any] = max_length
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Optional[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase__ : List[Any] = random.random() < self.p_stop
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False, __magic_name__=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = [
BatchSamplerShard(__SCREAMING_SNAKE_CASE, 2, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
for i in range(2 )
]
UpperCamelCase__ : Union[str, Any] = [list(__SCREAMING_SNAKE_CASE ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__SCREAMING_SNAKE_CASE ) for shard in batch_sampler_shards], [len(__SCREAMING_SNAKE_CASE ) for e in expected] )
self.assertListEqual(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase__ : str = BatchSampler(range(24 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(24 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase__ : str = BatchSampler(range(21 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(21 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase__ : Optional[Any] = BatchSampler(range(22 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BatchSampler(range(22 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase__ : Dict = BatchSampler(range(20 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = BatchSampler(range(20 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase__ : int = BatchSampler(range(2 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = BatchSampler(range(2 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase__ : Optional[int] = BatchSampler(range(24 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BatchSampler(range(24 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase__ : Optional[Any] = BatchSampler(range(22 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = BatchSampler(range(22 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = BatchSampler(range(21 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase__ : Optional[int] = BatchSampler(range(2 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = BatchSampler(range(2 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase__ : List[str] = BatchSampler(range(24 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = BatchSampler(range(24 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase__ : List[str] = BatchSampler(range(21 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = BatchSampler(range(21 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase__ : int = BatchSampler(range(22 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = BatchSampler(range(22 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase__ : List[str] = BatchSampler(range(20 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = BatchSampler(range(20 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase__ : Any = BatchSampler(range(2 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = [[[0, 1]], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = BatchSampler(range(2 ), batch_size=3, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase__ : List[Any] = BatchSampler(range(24 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(24 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
# Expected shouldn't change
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(22 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = BatchSampler(range(22 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase__ : Any = BatchSampler(range(21 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = BatchSampler(range(21 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
# Check the shards when the dataset is very small.
UpperCamelCase__ : int = BatchSampler(range(2 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = [[[0, 1]], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = BatchSampler(range(2 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = [[], []]
self.check_batch_sampler_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase__ : Union[str, Any] = [BatchSamplerShard(__SCREAMING_SNAKE_CASE, 2, __SCREAMING_SNAKE_CASE, even_batches=__SCREAMING_SNAKE_CASE ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ), 3 )
self.assertEqual(len(batch_sampler_shards[1] ), 2 )
self.assertListEqual(list(batch_sampler_shards[0] ), [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ), [[3, 4], [9, 10, 11]] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=False, __magic_name__=2, __magic_name__=False ) -> Tuple:
"""simple docstring"""
random.seed(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [
IterableDatasetShard(
__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE, drop_last=__SCREAMING_SNAKE_CASE, num_processes=__SCREAMING_SNAKE_CASE, process_index=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE, )
for i in range(__SCREAMING_SNAKE_CASE )
]
UpperCamelCase__ : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__SCREAMING_SNAKE_CASE )
iterable_dataset_lists.append(list(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[str] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase__ : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) )
self.assertTrue(len(__SCREAMING_SNAKE_CASE ) % shard_batch_size == 0 )
UpperCamelCase__ : List[Any] = []
for idx in range(0, len(__SCREAMING_SNAKE_CASE ), __SCREAMING_SNAKE_CASE ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE ):
reference += reference
self.assertListEqual(__SCREAMING_SNAKE_CASE, reference[: len(__SCREAMING_SNAKE_CASE )] )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = 42
UpperCamelCase__ : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
# Edge case with a very small dataset
UpperCamelCase__ : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
self.check_iterable_dataset_shards(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, batch_size=4, drop_last=__SCREAMING_SNAKE_CASE, split_batches=__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BatchSampler(range(16 ), batch_size=4, drop_last=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = SkipBatchSampler(__SCREAMING_SNAKE_CASE, 2 )
self.assertListEqual(list(__SCREAMING_SNAKE_CASE ), [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = SkipDataLoader(list(range(16 ) ), batch_size=4, skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = DataLoader(list(range(16 ) ), batch_size=4 )
UpperCamelCase__ : List[Any] = skip_first_batches(__SCREAMING_SNAKE_CASE, num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = DataLoaderShard(list(range(16 ) ), batch_size=4 )
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
Accelerator()
UpperCamelCase__ : int = DataLoaderDispatcher(range(16 ), batch_size=4 )
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__SCREAMING_SNAKE_CASE ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
| 201 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
lowerCAmelCase : Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
lowerCAmelCase : Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class _A ( UpperCamelCase_):
SCREAMING_SNAKE_CASE : int = """whisper"""
SCREAMING_SNAKE_CASE : Dict = ["""past_key_values"""]
SCREAMING_SNAKE_CASE : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _SCREAMING_SNAKE_CASE=5_1865 , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=1536 , _SCREAMING_SNAKE_CASE=1536 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1500 , _SCREAMING_SNAKE_CASE=448 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=5_0256 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=[220, 5_0256] , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=7 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = num_mel_bins
SCREAMING_SNAKE_CASE_ : List[Any] = d_model
SCREAMING_SNAKE_CASE_ : str = encoder_layers
SCREAMING_SNAKE_CASE_ : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : str = decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : str = dropout
SCREAMING_SNAKE_CASE_ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_function
SCREAMING_SNAKE_CASE_ : Dict = init_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE_ : Dict = encoder_layers
SCREAMING_SNAKE_CASE_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ : Any = max_source_positions
SCREAMING_SNAKE_CASE_ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Optional[Any] = classifier_proj_size
SCREAMING_SNAKE_CASE_ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : List[str] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_length
SCREAMING_SNAKE_CASE_ : Tuple = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Tuple = mask_feature_length
SCREAMING_SNAKE_CASE_ : Tuple = mask_feature_min_masks
SCREAMING_SNAKE_CASE_ : Optional[Any] = median_filter_width
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , suppress_tokens=__SCREAMING_SNAKE_CASE , begin_suppress_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class _A ( UpperCamelCase_):
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {0: 'batch'}
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='inputs' )
return common_inputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 2_2050 , _SCREAMING_SNAKE_CASE = 5.0 , _SCREAMING_SNAKE_CASE = 220 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = OrderedDict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , time_duration=__SCREAMING_SNAKE_CASE , frequency=__SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = encoder_inputs['input_features'].shape[2]
SCREAMING_SNAKE_CASE_ : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE_ : int = super().generate_dummy_inputs(
preprocessor.tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = encoder_inputs.pop('input_features' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE_ : List[Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 1e-3
| 253 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A =logging.getLogger(__name__)
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _a :
__a : Optional[str] = field(default=UpperCamelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
__a : Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a : Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A ( self : List[str] ):
'''simple docstring'''
if self.train_file is not None:
UpperCAmelCase = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _a :
__a : PreTrainedTokenizerBase
__a : Union[bool, str, PaddingStrategy] = True
__a : Optional[int] = None
__a : Optional[int] = None
def __call__( self : Dict , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCAmelCase = [feature.pop(__SCREAMING_SNAKE_CASE ) for feature in features]
UpperCAmelCase = len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = len(features[0]['''input_ids'''] )
UpperCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(__SCREAMING_SNAKE_CASE )] for feature in features
]
UpperCAmelCase = list(chain(*__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = self.tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
UpperCAmelCase = {k: v.view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.intaa )
return batch
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase = {}
if data_args.train_file is not None:
UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase = data_args.validation_file
UpperCAmelCase = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase = load_dataset(
snake_case__ , data_files=snake_case__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase = [F"ending{i}" for i in range(4 )]
UpperCAmelCase = '''sent1'''
UpperCAmelCase = '''sent2'''
if data_args.max_seq_length is None:
UpperCAmelCase = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
UpperCAmelCase = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_a : Tuple ):
UpperCAmelCase = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase = examples[question_header_name]
UpperCAmelCase = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(snake_case__ )
]
# Flatten out
UpperCAmelCase = list(chain(*snake_case__ ) )
UpperCAmelCase = list(chain(*snake_case__ ) )
# Tokenize
UpperCAmelCase = tokenizer(
snake_case__ , snake_case__ , truncation=snake_case__ , max_length=snake_case__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(snake_case__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(snake_case__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(snake_case__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(snake_case__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=snake_case__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_a : Dict ):
UpperCAmelCase , UpperCAmelCase = eval_predictions
UpperCAmelCase = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
UpperCAmelCase = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''train''' , snake_case__ )
trainer.save_metrics('''train''' , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
UpperCAmelCase = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('''eval''' , snake_case__ )
trainer.save_metrics('''eval''' , snake_case__ )
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def snake_case_ (_a : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : Any , _a : Optional[Any]=3 , _a : Any=32 , _a : Dict=3 , _a : str=10 , _a : str=[8, 16, 32, 64] , _a : Dict=[1, 1, 2, 1] , _a : Optional[int]=True , _a : Any=True , _a : List[Any]="relu" , _a : str=3 , _a : Dict=None , _a : Dict=["stage2", "stage3", "stage4"] , _a : str=[2, 3, 4] , _a : Optional[int]=1 , ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : List[str] = embeddings_size
__lowerCamelCase : List[str] = hidden_sizes
__lowerCamelCase : str = depths
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Tuple = use_labels
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[int] = num_labels
__lowerCamelCase : Tuple = scope
__lowerCamelCase : Tuple = len(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = out_features
__lowerCamelCase : int = out_indices
__lowerCamelCase : str = num_groups
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Union[str, Any] ) -> Tuple:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowercase ( self : Union[str, Any] , _a : List[Any] , _a : Tuple , _a : List[Any] ) -> Optional[Any]:
__lowerCamelCase : int = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self : Optional[int] , _a : Optional[Any] , _a : Any , _a : List[str] ) -> Dict:
__lowerCamelCase : Dict = self.num_labels
__lowerCamelCase : Any = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Union[str, Any] , _a : int , _a : Optional[Any] , _a : str ) -> List[str]:
__lowerCamelCase : List[str] = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self : List[str] ) -> Optional[int]:
__lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : int = config_and_inputs
__lowerCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a_ =(
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
a_ =False
a_ =False
a_ =False
a_ =False
a_ =False
def _lowercase ( self : List[Any] ) -> List[Any]:
__lowerCamelCase : List[str] = BitModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> Any:
return
@unittest.skip(reason='Bit does not output attentions' )
def _lowercase ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _lowercase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _lowercase ( self : Union[str, Any] ) -> Dict:
pass
def _lowercase ( self : str ) -> Optional[Any]:
__lowerCamelCase ,__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> Any:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Union[str, Any]:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Union[str, Any]:
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _lowercase ( self : str ) -> Dict:
def check_hidden_states_output(_a : str , _a : int , _a : Optional[Any] ):
__lowerCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCamelCase : int = layer_type
__lowerCamelCase : Tuple = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : str = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a_ ( ) -> Tuple:
__lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[Any] ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ =(BitBackbone,) if is_torch_available() else ()
a_ =BitConfig
a_ =False
def _lowercase ( self : int ) -> Dict:
__lowerCamelCase : Tuple = BitModelTester(self )
| 208 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : int = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __lowercase ( UpperCamelCase_ ):
lowerCamelCase : Tuple = """rwkv"""
lowerCamelCase : List[str] = {"""max_position_embeddings""": """context_length"""}
def __init__(self , A=5_0_2_7_7 , A=1_0_2_4 , A=4_0_9_6 , A=3_2 , A=None , A=None , A=1E-5 , A=0 , A=0 , A=6 , A=False , A=True , **A , ):
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : List[Any] = context_length
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase_ : List[str] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase_ : Any = layer_norm_epsilon
lowerCamelCase_ : Tuple = rescale_every
lowerCamelCase_ : Any = use_cache
lowerCamelCase_ : Optional[Any] = bos_token_id
lowerCamelCase_ : int = eos_token_id
super().__init__(
tie_word_embeddings=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 318 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Any = '''Hello, World!'''
lowerCamelCase : int = '''en_XX'''
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
__lowercase : List[Any] = Path("""data_bin""" )
__lowercase : List[Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(snake_case__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(snake_case__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__lowercase : str = xmod.model.encoder.sentence_encoder
__lowercase : Optional[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , snake_case__ )
__lowercase : int = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__lowercase : Dict = xmod_sent_encoder.embed_positions.weight
__lowercase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase : Tuple = xmod_sent_encoder.layernorm_embedding.weight
__lowercase : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase : Tuple = model.roberta.encoder.layer[i]
__lowercase : int = xmod_sent_encoder.layers[i]
# self attention
__lowercase : Optional[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__lowercase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
__lowercase : Optional[int] = xmod_layer.self_attn.q_proj.bias
__lowercase : List[Any] = xmod_layer.self_attn.k_proj.weight
__lowercase : Union[str, Any] = xmod_layer.self_attn.k_proj.bias
__lowercase : Optional[int] = xmod_layer.self_attn.v_proj.weight
__lowercase : Tuple = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase : List[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__lowercase : List[str] = xmod_layer.self_attn.out_proj.weight
__lowercase : Optional[Any] = xmod_layer.self_attn.out_proj.bias
__lowercase : Optional[int] = xmod_layer.self_attn_layer_norm.weight
__lowercase : Union[str, Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__lowercase : Union[str, Any] = xmod_layer.fca.weight
__lowercase : Dict = xmod_layer.fca.bias
# output
__lowercase : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__lowercase : str = xmod_layer.fca.weight
__lowercase : List[Any] = xmod_layer.fca.bias
__lowercase : Any = xmod_layer.final_layer_norm.weight
__lowercase : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase : str = xmod_layer.adapter_layer_norm.weight
__lowercase : Union[str, Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase : str = bert_output.adapter_modules[lang_code]
__lowercase : Any = xmod_layer.adapter_modules[lang_code]
__lowercase : Optional[int] = from_adapter.fca.weight
__lowercase : Any = from_adapter.fca.bias
__lowercase : Union[str, Any] = from_adapter.fca.weight
__lowercase : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase : Optional[Any] = xmod_sent_encoder.layer_norm.weight
__lowercase : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].dense.weight
__lowercase : Union[str, Any] = xmod.model.classification_heads["""mnli"""].dense.bias
__lowercase : Optional[int] = xmod.model.classification_heads["""mnli"""].out_proj.weight
__lowercase : Optional[Any] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowercase : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight
__lowercase : List[Any] = xmod.model.encoder.lm_head.dense.bias
__lowercase : str = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase : Optional[int] = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase : Optional[Any] = xmod.model.encoder.lm_head.weight
__lowercase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase : str = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__lowercase : Any = model(snake_case__ )[0]
if classification_head:
__lowercase : List[str] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(snake_case__ ) )
else:
__lowercase : List[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__lowercase : Tuple = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__SCREAMING_SNAKE_CASE , )
assert hasattr(self , 'env' )
def lowercase_ ( self : Any , __snake_case : Any ):
a : Optional[Any] = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
a : Optional[int] = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__SCREAMING_SNAKE_CASE , instance_count=__SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=__SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__SCREAMING_SNAKE_CASE , py_version='py36' , )
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, Any] ):
TrainingJobAnalytics(__SCREAMING_SNAKE_CASE ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : List[str] , __snake_case : Tuple ):
# create estimator
a : int = self.create_estimator(__SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
a : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
a : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __SCREAMING_SNAKE_CASE ) | 297 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case : Union[str, Any] = data_utils.TransfoXLTokenizer
snake_case : Tuple = data_utils.TransfoXLCorpus
snake_case : Union[str, Any] = data_utils
snake_case : Union[str, Any] = data_utils
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
a__ = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
a__ = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
a__ = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
a__ = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ = os.path.abspath(snake_case__ )
a__ = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ = TransfoXLConfig()
else:
a__ = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
a__ = TransfoXLLMHeadModel(snake_case__ )
a__ = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
a__ = os.path.join(snake_case__ , snake_case__ )
a__ = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
snake_case : Optional[int] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 240 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=__SCREAMING_SNAKE_CASE ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=__SCREAMING_SNAKE_CASE ,output_type="""numpy""" ,return_dict=__SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=__SCREAMING_SNAKE_CASE ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = torch.load(snake_case__ , map_location="cpu" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.load(snake_case__ , map_location="cpu" )["model"]
# pop unnecessary weights
SCREAMING_SNAKE_CASE_: List[str] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case__ )
SCREAMING_SNAKE_CASE_: List[str] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE_: Optional[int] = sd.pop(snake_case__ )
SCREAMING_SNAKE_CASE_: int = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE_: Union[str, Any] = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE_: Optional[Any] = key.replace(".qkv_proj." , ".q_proj." )
SCREAMING_SNAKE_CASE_: Optional[int] = key.replace(".qkv_proj." , ".k_proj." )
SCREAMING_SNAKE_CASE_: str = key.replace(".qkv_proj." , ".v_proj." )
SCREAMING_SNAKE_CASE_: Tuple = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.split(snake_case__ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE_: Optional[Any] = q
SCREAMING_SNAKE_CASE_: Any = k
SCREAMING_SNAKE_CASE_: List[str] = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = load_checkpoint(snake_case__ )
if config is not None:
SCREAMING_SNAKE_CASE_: Tuple = OPTConfig.from_pretrained(snake_case__ )
else:
SCREAMING_SNAKE_CASE_: int = OPTConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] = OPTModel(snake_case__ ).half().eval()
model.load_state_dict(snake_case__ )
# Check results
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowerCAmelCase : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 13 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A ( UpperCamelCase_ ):
'''simple docstring'''
A = (CMStochasticIterativeScheduler,)
A = 1_0
def a_ (self , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Any = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.002,
"sigma_max": 8_0.0,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def a_ (self ) -> List[Any]:
__UpperCamelCase : List[str] = 1_0
__UpperCamelCase : int = self.get_scheduler_config()
__UpperCamelCase : Any = self.scheduler_classes[0](**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : List[Any] = scheduler.timesteps[0]
__UpperCamelCase : Union[str, Any] = scheduler.timesteps[1]
__UpperCamelCase : int = self.dummy_sample
__UpperCamelCase : Union[str, Any] = 0.1 * sample
__UpperCamelCase : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
__UpperCamelCase : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a_ (self ) -> Any:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def a_ (self ) -> str:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__SCREAMING_SNAKE_CASE )
def a_ (self ) -> str:
__UpperCamelCase : Dict = self.scheduler_classes[0]
__UpperCamelCase : List[str] = self.get_scheduler_config()
__UpperCamelCase : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : List[Any] = 1
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Union[str, Any] = scheduler.timesteps
__UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = self.dummy_model()
__UpperCamelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__SCREAMING_SNAKE_CASE ):
# 1. scale model input
__UpperCamelCase : Union[str, Any] = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict noise residual
__UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
__UpperCamelCase : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
__UpperCamelCase : str = pred_prev_sample
__UpperCamelCase : List[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__UpperCamelCase : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def a_ (self ) -> str:
__UpperCamelCase : List[str] = self.scheduler_classes[0]
__UpperCamelCase : Union[str, Any] = self.get_scheduler_config()
__UpperCamelCase : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Any = [1_0_6, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Any = scheduler.timesteps
__UpperCamelCase : Dict = torch.manual_seed(0 )
__UpperCamelCase : Any = self.dummy_model()
__UpperCamelCase : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__UpperCamelCase : Tuple = scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict noise residual
__UpperCamelCase : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
__UpperCamelCase : Optional[int] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
__UpperCamelCase : Optional[int] = pred_prev_sample
__UpperCamelCase : Optional[int] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__UpperCamelCase : Optional[int] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def a_ (self ) -> List[Any]:
__UpperCamelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCamelCase : Optional[int] = self.get_scheduler_config()
__UpperCamelCase : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def a_ (self ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCamelCase : Any = self.get_scheduler_config()
__UpperCamelCase : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = [3_9, 3_0, 1_2, 1, 0]
__UpperCamelCase : Optional[int] = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[Any] = self.scheduler_classes[0]
__UpperCamelCase : str = self.get_scheduler_config()
__UpperCamelCase : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCAmelCase_ = ['''gpt2''']
UpperCAmelCase_ = '''gpt2'''
if is_tf_available():
class lowercase__ ( tf.Module ):
'''simple docstring'''
def __init__( self, __magic_name__ ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict = tokenizer
UpperCamelCase__ : Union[str, Any] = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = TFGPTaLMHeadModel.from_config(__SCREAMING_SNAKE_CASE )
@tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name='''text''' ),) )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.tokenizer(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = tokenized['''input_ids'''].to_tensor()
UpperCamelCase__ : Optional[Any] = tf.cast(input_ids_dense > 0, tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCamelCase__ : int = self.model(input_ids=__SCREAMING_SNAKE_CASE, attention_mask=__SCREAMING_SNAKE_CASE )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Any = [GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCamelCase__ : List[Any] = [TFGPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase__ : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
UpperCamelCase__ : Dict = list(zip(self.test_sentences, self.test_sentences[::-1] ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCamelCase__ : Union[str, Any] = tokenizer([test_inputs], return_tensors='''tf''' )
UpperCamelCase__ : int = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCamelCase__ : Union[str, Any] = python_outputs[key].numpy()
UpperCamelCase__ : str = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__SCREAMING_SNAKE_CASE, tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : Union[str, Any] = tf.function(__SCREAMING_SNAKE_CASE )
for test_inputs in self.test_sentences:
UpperCamelCase__ : str = tf.constant(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = compiled_tokenizer(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = tf_tokenizer(__SCREAMING_SNAKE_CASE )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : int = ModelToSave(tokenizer=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase__ : str = model.serving(__SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase__ : List[str] = Path(__SCREAMING_SNAKE_CASE ) / '''saved.model'''
tf.saved_model.save(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, signatures={'''serving_default''': model.serving} )
UpperCamelCase__ : Union[str, Any] = tf.saved_model.load(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = loaded_model.signatures['''serving_default'''](__SCREAMING_SNAKE_CASE )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase__ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase__ : Any = tf_tokenizer(__SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
UpperCamelCase__ : Optional[Any] = tf_tokenizer.get_config()
UpperCamelCase__ : Optional[Any] = TFGPTaTokenizer.from_config(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = model_from_config(__SCREAMING_SNAKE_CASE )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCamelCase__ : Optional[int] = 123123
for max_length in [3, 5, 1024]:
UpperCamelCase__ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCamelCase__ : Any = tf_tokenizer(__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 201 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase__ : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase__ : Dict = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Union[str, Any]:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__SCREAMING_SNAKE_CASE , '''w''' , newline='''\n''' ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
lowerCAmelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __SCREAMING_SNAKE_CASE ) , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 338 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( UpperCamelCase_ , unittest.TestCase):
SCREAMING_SNAKE_CASE : Optional[Any] = LxmertTokenizer
SCREAMING_SNAKE_CASE : Dict = LxmertTokenizerFast
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
def UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : str = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : str = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 253 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 0 |
'''simple docstring'''
import random
def snake_case_ (_a : int , _a : Optional[int] , _a : Tuple = False ):
UpperCAmelCase = {i: [] for i in range(snake_case__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if random.random() < probability:
graph[i].append(snake_case__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case__ )
return graph
def snake_case_ (_a : Union[str, Any] ):
return {
i: [j for j in range(snake_case__ ) if i != j] for i in range(snake_case__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | import unittest
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ) -> np.ndarray:
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
lowerCAmelCase = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
lowerCAmelCase = (
'''Expected the same number of rows for A and B. '''
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
lowerCAmelCase = (
'''Expected the same number of columns for B and C. '''
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case__ )
lowerCAmelCase = pseudo_inv
if a_inv is None:
try:
lowerCAmelCase = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
lowerCAmelCase = schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.block([[a, b], [b.T, c]] )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->None:
lowerCAmelCase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCAmelCase = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCAmelCase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 338 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCamelCase = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
_UpperCamelCase = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
_UpperCamelCase = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def _lowercase ( self : Dict , _a : Dict , _a : str ) -> int:
__lowerCamelCase : List[Any] = 0.0
for i, j in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 0.0
__lowerCamelCase : int = n_correct / len(__SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 208 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Optional[int] = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : int , __a : Dict = 101 ) -> Any:
"""simple docstring"""
__lowercase : List[str] = length
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , __a : Tuple ) -> int:
"""simple docstring"""
return i
class lowerCAmelCase :
'''simple docstring'''
def __call__( self : Any , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(__SCREAMING_SNAKE_CASE ), "labels": torch.tensor(__SCREAMING_SNAKE_CASE )}
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__lowercase : List[str] = nn.Linear(120 , 80 )
def lowerCAmelCase ( self : List[Any] , __a : Optional[Any] , __a : int=None ) -> List[str]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
@require_torch_neuroncore
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : Optional[int] = F"--output_dir {output_dir}".split()
__lowercase : List[str] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
@require_torch_multi_gpu
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__lowercase : Dict = self.get_auto_remove_tmp_dir()
__lowercase : Optional[int] = F"--output_dir {output_dir}".split()
__lowercase : str = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase : List[Any] = HfArgumentParser((TrainingArguments,))
lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
lowerCamelCase : List[Any] = DummyDataset(dataset_length)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : Union[str, Any] = list(range(len(snake_case__ ) ) )
__lowercase : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCamelCase : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : int = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase : Union[str, Any] = None | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[str]:
lowerCAmelCase = len(snake_case__ )
for i in range(length - 1 ):
lowerCAmelCase = i
for k in range(i + 1 , snake_case__ ):
if collection[k] < collection[least]:
lowerCAmelCase = k
if least != i:
lowerCAmelCase , lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : str = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 338 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : Union[str, Any] ):
a : Union[str, Any] = 1
a : Union[str, Any] = 3
a : Any = (32, 32)
a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__SCREAMING_SNAKE_CASE )
return image
@property
def lowercase_ ( self : List[Any] ):
torch.manual_seed(0 )
a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowercase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
a : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowercase_ ( self : List[Any] ):
torch.manual_seed(0 )
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE )
@property
def lowercase_ ( self : Dict ):
def extract(*__snake_case : str , **__snake_case : Optional[Any] ):
class a__:
def __init__( self : List[Any] ):
a : Any = torch.ones([0] )
def lowercase_ ( self : List[Any] , __snake_case : str ):
self.pixel_values.to(__SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def lowercase_ ( self : List[str] ):
a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Optional[Any] = self.dummy_cond_unet
a : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
a : Tuple = self.dummy_vae
a : List[str] = self.dummy_text_encoder
a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
a : Optional[Any] = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a : List[Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a : Dict = 'A painting of a squirrel eating a burger'
a : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a : List[str] = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
a : Dict = output.images
a : Dict = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a : Dict = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Union[str, Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : int ):
a : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : List[str] = self.dummy_cond_unet
a : int = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
a : Optional[int] = self.dummy_vae
a : int = self.dummy_text_encoder
a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
a : Dict = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a : Any = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a : str = 'A painting of a squirrel eating a burger'
a : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a : Union[str, Any] = sd_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
a : Optional[int] = output.images
a : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
a : Tuple = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=__SCREAMING_SNAKE_CASE , )[0]
a : int = image[0, -3:, -3:, -1]
a : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : List[str] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[str] ):
a : List[Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=__SCREAMING_SNAKE_CASE )
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert isinstance(pipe.scheduler , __SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
a : List[Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__SCREAMING_SNAKE_CASE )
a : Dict = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a : int = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self : List[Any] ):
a : Any = self.dummy_cond_unet
a : List[Any] = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
a : int = self.dummy_vae
a : Any = self.dummy_text_encoder
a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
a : Any = unet.half()
a : Tuple = vae.half()
a : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
a : List[str] = StableDiffusionPipeline(
unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
a : int = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a : int = 'A painting of a squirrel eating a burger'
a : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Union[str, Any] ):
a : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=__SCREAMING_SNAKE_CASE )
a : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a : Tuple = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a : Tuple = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
a : List[str] = 40_03_66_03_46
a : str = 7
# without safety guidance (sld_guidance_scale = 0)
a : List[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a : Union[str, Any] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
a : Optional[int] = output.images
a : str = image[0, -3:, -3:, -1]
a : List[Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
a : Optional[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a : Any = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a : Optional[Any] = output.images
a : Optional[int] = image[0, -3:, -3:, -1]
a : Union[str, Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Any ):
a : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=__SCREAMING_SNAKE_CASE )
a : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a : List[str] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a : Optional[Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
a : Optional[Any] = 27_34_97_17_55
a : Union[str, Any] = 7
a : str = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a : Optional[Any] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
a : str = output.images
a : List[Any] = image[0, -3:, -3:, -1]
a : Union[str, Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
a : Optional[Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a : Optional[int] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a : Any = output.images
a : Optional[Any] = image[0, -3:, -3:, -1]
a : int = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Optional[int] ):
a : List[str] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
a : Optional[Any] = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
a : str = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
a : Tuple = 10_44_35_52_34
a : Optional[int] = 12
a : Tuple = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a : Dict = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
a : str = output.images
a : int = image[0, -3:, -3:, -1]
a : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
a : Dict = torch.manual_seed(__SCREAMING_SNAKE_CASE )
a : Optional[int] = sd_pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a : str = output.images
a : int = image[0, -3:, -3:, -1]
a : List[str] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 297 | import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ : List[Any] = ()
UpperCAmelCase_ : Tuple = {} if is_torch_available() else {}
UpperCAmelCase_ : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold only has one output format.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
@require_torch
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )['''positions''']
lowerCAmelCase = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 338 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : List[Any] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
snake_case : Any = {
'''gpt-neox-20b''': 20_48,
}
class snake_case_ (UpperCamelCase_ ):
UpperCAmelCase__ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self :List[str] ,__snake_case :int=None ,__snake_case :int=None ,__snake_case :List[Any]=None ,__snake_case :Union[str, Any]="<|endoftext|>" ,__snake_case :Tuple="<|endoftext|>" ,__snake_case :Optional[Any]="<|endoftext|>" ,__snake_case :Union[str, Any]=False ,**__snake_case :Any ,) -> Tuple:
super().__init__(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
a__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__SCREAMING_SNAKE_CASE ) != add_prefix_space:
a__ = getattr(__SCREAMING_SNAKE_CASE ,pre_tok_state.pop('type' ) )
a__ = add_prefix_space
a__ = pre_tok_class(**__SCREAMING_SNAKE_CASE )
a__ = add_prefix_space
def lowerCamelCase__( self :str ,__snake_case :List[Any] ,__snake_case :Any = None ) -> Tuple[str]:
a__ = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Dict ) -> List[int]:
a__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(__SCREAMING_SNAKE_CASE ) > self.model_max_length:
a__ = input_ids[-self.model_max_length :]
return input_ids
| 240 | import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase_ : int = """OwlViTImageProcessor"""
UpperCAmelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="max_length" , __SCREAMING_SNAKE_CASE="np" , **__SCREAMING_SNAKE_CASE ) ->int:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or (isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , __SCREAMING_SNAKE_CASE )):
lowerCAmelCase = [self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(text[0] , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(__SCREAMING_SNAKE_CASE ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__SCREAMING_SNAKE_CASE ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(__SCREAMING_SNAKE_CASE ))
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
encodings.append(__SCREAMING_SNAKE_CASE )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return self.image_processor.post_process(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Any:
return self.image_processor.post_process_object_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->Tuple:
return self.image_processor.post_process_image_guided_detection(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->str:
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 338 | 0 |
import unittest
import numpy as np
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.shape(snake_case__ )
SCREAMING_SNAKE_CASE = np.shape(snake_case__ )
SCREAMING_SNAKE_CASE = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
SCREAMING_SNAKE_CASE = (
"""Expected the same number of rows for A and B. """
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
SCREAMING_SNAKE_CASE = (
"""Expected the same number of columns for B and C. """
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(snake_case__ )
SCREAMING_SNAKE_CASE = pseudo_inv
if a_inv is None:
try:
SCREAMING_SNAKE_CASE = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE = np.array([[2, 1], [6, 3]] )
SCREAMING_SNAKE_CASE = schur_complement(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.block([[a, b], [b.T, c]] )
SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.linalg.det(__SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(__SCREAMING_SNAKE_CASE ,det_a * det_s )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
schur_complement(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 296 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
def constraint_to_multiple_of(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0 , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_: Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_: Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_: Dict = (output_size, output_size) if isinstance(snake_case__ , snake_case__ ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = get_image_size(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE_: Tuple = output_height / input_height
SCREAMING_SNAKE_CASE_: int = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_: str = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_: int = scale_height
SCREAMING_SNAKE_CASE_: Optional[int] = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case__ )
SCREAMING_SNAKE_CASE_: Dict = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case__ )
return (new_height, new_width)
class __lowercase ( UpperCamelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] = True , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = PILImageResampling.BILINEAR , lowerCAmelCase__ : int = False , lowerCAmelCase__ : str = 1 , lowerCAmelCase__ : List[Any] = True , lowerCAmelCase__ : Any = 1 / 255 , lowerCAmelCase__ : int = True , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : str = None , **lowerCAmelCase__ : List[Any] , ):
super().__init__(**__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: Union[str, Any] = size if size is not None else {"height": 384, "width": 384}
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: int = do_resize
SCREAMING_SNAKE_CASE_: List[str] = size
SCREAMING_SNAKE_CASE_: Tuple = keep_aspect_ratio
SCREAMING_SNAKE_CASE_: List[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE_: Union[str, Any] = resample
SCREAMING_SNAKE_CASE_: Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_: Dict = rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_: str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple = False , lowerCAmelCase__ : str = 1 , lowerCAmelCase__ : Dict = PILImageResampling.BICUBIC , lowerCAmelCase__ : int = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: Any = get_size_dict(__SCREAMING_SNAKE_CASE)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: int = get_resize_output_image_size(
__SCREAMING_SNAKE_CASE , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__SCREAMING_SNAKE_CASE , multiple=__SCREAMING_SNAKE_CASE , )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = None , **lowerCAmelCase__ : List[str] , ):
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any = None , **lowerCAmelCase__ : str , ):
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : List[str] = ChannelDimension.FIRST , **lowerCAmelCase__ : List[str] , ):
SCREAMING_SNAKE_CASE_: List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_: Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_: str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Tuple = make_list_of_images(__SCREAMING_SNAKE_CASE)
if not valid_images(__SCREAMING_SNAKE_CASE):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Dict = [to_numpy_array(__SCREAMING_SNAKE_CASE) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Dict = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: List[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: Tuple = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE) for image in images]
SCREAMING_SNAKE_CASE_: str = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict = None):
SCREAMING_SNAKE_CASE_: Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__SCREAMING_SNAKE_CASE):
SCREAMING_SNAKE_CASE_: int = target_sizes.numpy()
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE)):
SCREAMING_SNAKE_CASE_: List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE_: Dict = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__SCREAMING_SNAKE_CASE)
else:
SCREAMING_SNAKE_CASE_: Any = logits.argmax(dim=1)
SCREAMING_SNAKE_CASE_: Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 13 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A = StableUnCLIPPipeline
A = TEXT_TO_IMAGE_PARAMS
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A = False
def a_ (self ) -> Any:
__UpperCamelCase : Dict = 3_2
__UpperCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__UpperCamelCase : str = StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCamelCase : Tuple = AutoencoderKL()
__UpperCamelCase : List[str] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Union[str, Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ (self ) -> Dict:
__UpperCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__UpperCamelCase : Dict = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : List[str] = pipe("anime turle" , generator=__SCREAMING_SNAKE_CASE , output_type="np" )
__UpperCamelCase : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a_ (self ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[int] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 298 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
import math
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> bool:
UpperCamelCase__ : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case__ )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] = 1 / 1_2345 ) -> int:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Union[str, Any] = 3
while True:
UpperCamelCase__ : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case__ ):
UpperCamelCase__ : Optional[int] = int(snake_case__ )
total_partitions += 1
if check_partition_perfect(snake_case__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case__ )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _A ( UpperCamelCase_):
SCREAMING_SNAKE_CASE : Any = """mgp-str"""
def __init__( self , _SCREAMING_SNAKE_CASE=[32, 128] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=27 , _SCREAMING_SNAKE_CASE=38 , _SCREAMING_SNAKE_CASE=5_0257 , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Dict = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = max_token_length
SCREAMING_SNAKE_CASE_ : int = num_character_labels
SCREAMING_SNAKE_CASE_ : str = num_bpe_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = num_wordpiece_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = mlp_ratio
SCREAMING_SNAKE_CASE_ : List[Any] = distilled
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = drop_rate
SCREAMING_SNAKE_CASE_ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE_ : Dict = attn_drop_rate
SCREAMING_SNAKE_CASE_ : Any = drop_path_rate
SCREAMING_SNAKE_CASE_ : str = output_aa_attentions
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
| 253 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( UpperCamelCase_ , unittest.TestCase ):
__a : Any = DebertaTokenizer
__a : Optional[int] = True
__a : Tuple = DebertaTokenizerFast
def A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase = {'''unk_token''': '''[UNK]'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def A ( self : str , **lowercase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def A ( self : str , lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = '''lower newer'''
return input_text, output_text
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = tokenizer('''Hello''' , '''World''' )
UpperCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCAmelCase = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 34 | lowercase__ : Optional[int] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def SCREAMING_SNAKE_CASE_ ( ) -> None:
lowerCAmelCase = input('''Enter message: ''' )
lowerCAmelCase = input('''Enter key [alphanumeric]: ''' )
lowerCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase = '''encrypt'''
lowerCAmelCase = encrypt_message(snake_case__ , snake_case__ )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase = '''decrypt'''
lowerCAmelCase = decrypt_message(snake_case__ , snake_case__ )
print(f"\n{mode.title()}ed message:" )
print(snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''encrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
return translate_message(snake_case__ , snake_case__ , '''decrypt''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = key.upper()
for symbol in message:
lowerCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case__ ):
lowerCAmelCase = 0
else:
translated.append(snake_case__ )
return "".join(snake_case__ )
if __name__ == "__main__":
main()
| 338 | 0 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_UpperCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_UpperCamelCase = [0, 25, 50]
_UpperCamelCase = [25, 50, 75]
_UpperCamelCase = fuzz.membership.trimf(X, abca)
_UpperCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_UpperCamelCase = np.ones(75)
_UpperCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_UpperCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_UpperCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_UpperCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_UpperCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_UpperCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_UpperCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_UpperCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_UpperCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 208 | from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __lowercase ( UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase : List[Any] = """focalnet"""
def __init__(self , A=2_2_4 , A=4 , A=3 , A=9_6 , A=False , A=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , A=[2, 2, 6, 2] , A=[2, 2, 2, 2] , A=[3, 3, 3, 3] , A="gelu" , A=4.0 , A=0.0 , A=0.1 , A=False , A=1E-4 , A=False , A=False , A=False , A=0.02 , A=1E-5 , A=3_2 , A=None , A=None , **A , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Optional[int] = patch_size
lowerCamelCase_ : int = num_channels
lowerCamelCase_ : Dict = embed_dim
lowerCamelCase_ : Union[str, Any] = use_conv_embed
lowerCamelCase_ : List[Any] = hidden_sizes
lowerCamelCase_ : Optional[Any] = depths
lowerCamelCase_ : Optional[int] = focal_levels
lowerCamelCase_ : Optional[Any] = focal_windows
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Any = mlp_ratio
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = drop_path_rate
lowerCamelCase_ : str = use_layerscale
lowerCamelCase_ : str = layerscale_value
lowerCamelCase_ : Optional[int] = use_post_layernorm
lowerCamelCase_ : Any = use_post_layernorm_in_modulation
lowerCamelCase_ : Optional[Any] = normalize_modulator
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : str = encoder_stride
lowerCamelCase_ : List[str] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_, lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 318 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict=None , **__a : List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , __SCREAMING_SNAKE_CASE , )
super().__init__(args=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) | 233 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase: int = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: int = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
# Imports
import numpy as np
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :Union[str, Any]=None ,__snake_case :Dict=None ,__snake_case :Dict=None ,__snake_case :Dict=None ,__snake_case :List[str]=None ) -> int:
self.set_matricies(red=__SCREAMING_SNAKE_CASE ,green=__SCREAMING_SNAKE_CASE ,blue=__SCREAMING_SNAKE_CASE ,red_edge=__SCREAMING_SNAKE_CASE ,nir=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__( self :List[Any] ,__snake_case :Union[str, Any]=None ,__snake_case :List[str]=None ,__snake_case :Any=None ,__snake_case :Tuple=None ,__snake_case :List[str]=None ) -> List[Any]:
if red is not None:
a__ = red
if green is not None:
a__ = green
if blue is not None:
a__ = blue
if red_edge is not None:
a__ = red_edge
if nir is not None:
a__ = nir
return True
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Any="" ,__snake_case :str=None ,__snake_case :int=None ,__snake_case :List[Any]=None ,__snake_case :Any=None ,__snake_case :Union[str, Any]=None ) -> Dict:
self.set_matricies(red=__SCREAMING_SNAKE_CASE ,green=__SCREAMING_SNAKE_CASE ,blue=__SCREAMING_SNAKE_CASE ,red_edge=__SCREAMING_SNAKE_CASE ,nir=__SCREAMING_SNAKE_CASE )
a__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def lowerCamelCase__( self :Optional[int] ) -> Any:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCamelCase__( self :Dict ) -> int:
return self.nir * (self.red / (self.green**2))
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCamelCase__( self :List[Any] ) -> List[str]:
return (self.nir - self.red) / (self.nir + self.red)
def lowerCamelCase__( self :Union[str, Any] ) -> str:
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCamelCase__( self :Optional[int] ) -> str:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return (self.nir - self.green) / (self.nir + self.green)
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCamelCase__( self :Union[str, Any] ) -> int:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCamelCase__( self :List[str] ,__snake_case :List[str]=0.08 ,__snake_case :Union[str, Any]=1.22 ,__snake_case :int=0.03 ) -> List[str]:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCamelCase__( self :List[Any] ) -> List[str]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
return (self.nir / self.green) - 1
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
return (self.nir / self.redEdge) - 1
def lowerCamelCase__( self :Tuple ) -> List[Any]:
return (self.red - self.blue) / self.red
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
return self.nir - self.green
def lowerCamelCase__( self :List[str] ) -> Union[str, Any]:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict=0.16 ) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any]=0.5 ) -> Optional[Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCamelCase__( self :int ) -> Tuple:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCamelCase__( self :Optional[int] ,__snake_case :str=None ,__snake_case :int=None ) -> Tuple:
return (self.nir - b) / (a * self.red)
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCamelCase__( self :str ) -> List[str]:
return (self.red + self.green + self.blue) / 30.5
def lowerCamelCase__( self :Optional[Any] ) -> int:
return self.nir / self.red
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCamelCase__( self :Any ) -> Optional[Any]:
return self.green / (self.nir + self.red + self.green)
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
return self.nir / (self.nir + self.red + self.green)
def lowerCamelCase__( self :Dict ) -> int:
return self.red / (self.nir + self.red + self.green)
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
return (self.green - self.red) / (self.green + self.red)
def lowerCamelCase__( self :Optional[Any] ) -> Any:
return (self.red - self.green) / (self.red + self.green)
def lowerCamelCase__( self :str ) -> List[str]:
a__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
a__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCamelCase__( self :List[str] ) -> int:
return self.nir / self.red
def lowerCamelCase__( self :List[str] ) -> List[Any]:
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCamelCase__( self :Any ) -> Tuple:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 240 | import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ : Dict = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ : Optional[int] = [0, 2_5, 5_0]
lowercase__ : Union[str, Any] = [2_5, 5_0, 7_5]
lowercase__ : int = fuzz.membership.trimf(X, abca)
lowercase__ : Tuple = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ : List[str] = np.ones(7_5)
lowercase__ : Any = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ : Union[str, Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ : Tuple = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 338 | 0 |
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def __lowercase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Github(os.environ["""GITHUB_TOKEN"""] )
SCREAMING_SNAKE_CASE = g.get_repo("""huggingface/transformers""" )
SCREAMING_SNAKE_CASE = repo.get_issues(state="""open""" )
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=snake_case__ )
SCREAMING_SNAKE_CASE = comments[0] if len(snake_case__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 296 | import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[Any]:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 338 | 0 |
import math
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = [True] * n
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_: Optional[Any] = i * 2
while index < n:
SCREAMING_SNAKE_CASE_: Any = False
SCREAMING_SNAKE_CASE_: Tuple = index + i
SCREAMING_SNAKE_CASE_: Optional[Any] = [2]
for i in range(3 , snake_case__ , 2 ):
if is_prime[i]:
primes.append(snake_case__ )
return primes
def A_ ( _UpperCAmelCase = 99_99_66_66_33_33 ):
SCREAMING_SNAKE_CASE_: Optional[Any] = math.floor(math.sqrt(snake_case__ ) ) + 1_00
SCREAMING_SNAKE_CASE_: Optional[Any] = prime_sieve(snake_case__ )
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: int = 0
SCREAMING_SNAKE_CASE_: Any = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_: List[Any] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_: str = last_prime**2
SCREAMING_SNAKE_CASE_: Optional[int] = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_: List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_: Dict = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_: Dict = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_: str = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 13 | import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ : str = logging.get_logger(__name__)
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = """AutoTokenizer"""
UpperCAmelCase_ : Optional[int] = ["""tokenizer"""]
UpperCAmelCase_ : str = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
super().__init__(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = speaker_embeddings
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , **__SCREAMING_SNAKE_CASE ) ->Tuple:
if speaker_embeddings_dict_path is not None:
lowerCAmelCase = get_file_from_repo(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"`{os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
lowerCAmelCase = None
else:
with open(__SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowerCAmelCase = json.load(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = None
lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return cls(tokenizer=__SCREAMING_SNAKE_CASE , speaker_embeddings=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="speaker_embeddings_path.json" , __SCREAMING_SNAKE_CASE="speaker_embeddings" , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ) ->int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''v2''' ) , exist_ok=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
lowerCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE , F"{prompt_key}_{key}.npy" )
lowerCAmelCase = tmp_dict
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , '''w''' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
super().save_pretrained(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.speaker_embeddings[voice_preset]
lowerCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
lowerCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('''cache_dir''' , __SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('''force_download''' , __SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('''proxies''' , __SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('''resume_download''' , __SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('''local_files_only''' , __SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('''use_auth_token''' , __SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('''revision''' , __SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
return voice_preset_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE = None ) ->Tuple:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="pt" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ) ->int:
if voice_preset is not None and not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase = self._load_voice_preset(__SCREAMING_SNAKE_CASE )
else:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('''.npz''' ):
lowerCAmelCase = voice_preset + '''.npz'''
lowerCAmelCase = np.load(__SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowerCAmelCase = voice_preset
return encoded_text
| 338 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowerCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase ( snake_case__ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
return max(metric_fn(snake_case__ , snake_case__ ) for gt in ground_truths )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Any = [line.strip() for line in open(snake_case__ , "r" ).readlines()]
__UpperCamelCase : Tuple = []
if args.gold_data_mode == "qa":
__UpperCamelCase : Any = pd.read_csv(snake_case__ , sep="\t" , header=snake_case__ )
for answer_list in data[1]:
__UpperCamelCase : Optional[int] = ast.literal_eval(snake_case__ )
answers.append(snake_case__ )
else:
__UpperCamelCase : Union[str, Any] = [line.strip() for line in open(snake_case__ , "r" ).readlines()]
__UpperCamelCase : Dict = [[reference] for reference in references]
__UpperCamelCase : str = 0
for prediction, ground_truths in zip(snake_case__ , snake_case__ ):
total += 1
em += metric_max_over_ground_truths(snake_case__ , snake_case__ , snake_case__ )
fa += metric_max_over_ground_truths(snake_case__ , snake_case__ , snake_case__ )
__UpperCamelCase : Any = 100.0 * em / total
__UpperCamelCase : List[str] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : int = args.k
__UpperCamelCase : List[Any] = [line.strip() for line in open(snake_case__ , "r" ).readlines()]
__UpperCamelCase : Dict = [line.strip() for line in open(snake_case__ , "r" ).readlines()]
__UpperCamelCase : Any = 0
for hypo, reference in zip(snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = set(hypo.split("\t" )[:k] )
__UpperCamelCase : Optional[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__UpperCamelCase : Union[str, Any] = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def strip_title(snake_case__ ):
if title.startswith("\"" ):
__UpperCamelCase : Dict = title[1:]
if title.endswith("\"" ):
__UpperCamelCase : Optional[int] = title[:-1]
return title
__UpperCamelCase : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case__ , return_tensors="pt" , padding=snake_case__ , truncation=snake_case__ , )["input_ids"].to(args.device )
__UpperCamelCase : Union[str, Any] = rag_model.rag.question_encoder(snake_case__ )
__UpperCamelCase : List[str] = question_enc_outputs[0]
__UpperCamelCase : int = rag_model.retriever(
snake_case__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
__UpperCamelCase : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__UpperCamelCase : Optional[int] = []
for docs in all_docs:
__UpperCamelCase : Dict = [strip_title(snake_case__ ) for title in docs["title"]]
provenance_strings.append("\t".join(snake_case__ ) )
return provenance_strings
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
with torch.no_grad():
__UpperCamelCase : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case__ , return_tensors="pt" , padding=snake_case__ , truncation=snake_case__ )
__UpperCamelCase : Optional[int] = inputs_dict.input_ids.to(args.device )
__UpperCamelCase : str = inputs_dict.attention_mask.to(args.device )
__UpperCamelCase : List[str] = rag_model.generate( # rag_model overwrites generate
snake_case__ , attention_mask=snake_case__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__UpperCamelCase : Union[str, Any] = rag_model.retriever.generator_tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
if args.print_predictions:
for q, a in zip(snake_case__ , snake_case__ ):
logger.info("Q: {} - A: {}".format(snake_case__ , snake_case__ ) )
return answers
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=snake_case__ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=snake_case__ , choices=["exact", "compressed", "legacy"] , type=snake_case__ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=snake_case__ , type=snake_case__ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=snake_case__ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=snake_case__ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=snake_case__ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=snake_case__ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=snake_case__ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=snake_case__ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=snake_case__ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=snake_case__ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=snake_case__ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
__UpperCamelCase : Any = parser.parse_args()
__UpperCamelCase : str = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = {}
if args.model_type is None:
__UpperCamelCase : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__UpperCamelCase : List[Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
__UpperCamelCase : Tuple = args.n_docs
if args.index_name is not None:
__UpperCamelCase : List[str] = args.index_name
if args.index_path is not None:
__UpperCamelCase : str = args.index_path
else:
__UpperCamelCase : str = BartForConditionalGeneration
__UpperCamelCase : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , snake_case__ )
__UpperCamelCase : List[Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
__UpperCamelCase : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(snake_case__ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(snake_case__ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__UpperCamelCase : int = RagRetriever.from_pretrained(snake_case__ , **snake_case__ )
__UpperCamelCase : List[str] = model_class.from_pretrained(snake_case__ , retriever=snake_case__ , **snake_case__ )
model.retriever.init_retrieval()
else:
__UpperCamelCase : int = model_class.from_pretrained(snake_case__ , **snake_case__ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
__UpperCamelCase : int = []
for line in tqdm(snake_case__ ):
questions.append(line.strip() )
if len(snake_case__ ) == args.eval_batch_size:
__UpperCamelCase : List[str] = evaluate_batch_fn(snake_case__ , snake_case__ , snake_case__ )
preds_file.write("\n".join(snake_case__ ) + "\n" )
preds_file.flush()
__UpperCamelCase : int = []
if len(snake_case__ ) > 0:
__UpperCamelCase : Optional[Any] = evaluate_batch_fn(snake_case__ , snake_case__ , snake_case__ )
preds_file.write("\n".join(snake_case__ ) )
preds_file.flush()
score_fn(snake_case__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowerCAmelCase = get_args()
main(args)
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def A ( _UpperCAmelCase : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
_UpperCAmelCase = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
_UpperCAmelCase = F"{olid} is not a valid Open Library olid"
raise ValueError(_UpperCAmelCase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def A ( _UpperCAmelCase : dict ) -> dict:
'''simple docstring'''
_UpperCAmelCase = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
_UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
_UpperCAmelCase = data['First sentence']['value']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = ', '.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCAmelCase__ = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print("\n".join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 339 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 1 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowerCAmelCase :
def __init__( self : Optional[int] , A : int , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = 30
_UpperCAmelCase = self.seq_length + self.mem_len
_UpperCAmelCase = 15
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = [10, 50, 80]
_UpperCAmelCase = 32
_UpperCAmelCase = 32
_UpperCAmelCase = 4
_UpperCAmelCase = 8
_UpperCAmelCase = 1_28
_UpperCAmelCase = 2
_UpperCAmelCase = 2
_UpperCAmelCase = None
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = 3
_UpperCAmelCase = self.vocab_size - 1
_UpperCAmelCase = 0.0_1
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
random.seed(self.seed)
tf.random.set_seed(self.seed)
def _lowerCamelCase ( self : Union[str, Any] , A : str , A : Optional[int] , A : int , A : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFTransfoXLModel(A)
_UpperCAmelCase , _UpperCAmelCase = model(A).to_tuple()
_UpperCAmelCase = {'input_ids': input_ids_a, 'mems': mems_a}
_UpperCAmelCase , _UpperCAmelCase = model(A).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self : Tuple , A : Optional[int] , A : Dict , A : List[Any] , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFTransfoXLLMHeadModel(A)
_UpperCAmelCase , _UpperCAmelCase = model(A).to_tuple()
_UpperCAmelCase = {'input_ids': input_ids_a, 'labels': lm_labels}
_UpperCAmelCase , _UpperCAmelCase = model(A).to_tuple()
_UpperCAmelCase , _UpperCAmelCase = model([input_ids_a, mems_a]).to_tuple()
_UpperCAmelCase = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
_UpperCAmelCase , _UpperCAmelCase = model(A).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _lowerCamelCase ( self : Union[str, Any] , A : Union[str, Any] , A : int , A : int , A : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFTransfoXLForSequenceClassification(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase = () if is_tf_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int , A : Optional[int] , A : List[str] , A : Any , A : str , A : Tuple) -> Tuple:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFTransfoXLModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , d_embed=37)
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A)
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
self.model_tester.set_seed()
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A)
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_UpperCAmelCase = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer)
_UpperCAmelCase = model.get_bias()
assert name is None
else:
_UpperCAmelCase = model.get_output_embeddings()
assert x is None
_UpperCAmelCase = model.get_bias()
assert name is None
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFTransfoXLModel.from_pretrained(A)
self.assertIsNotNone(A)
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.')
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
pass
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.')
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
# fmt: off
_UpperCAmelCase = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCAmelCase = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCAmelCase = model.generate(A , max_length=2_00 , do_sample=A)
self.assertListEqual(output_ids[0].numpy().tolist() , A)
| 339 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 1 |
from __future__ import annotations
def A ( _UpperCAmelCase : dict , _UpperCAmelCase : str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(_UpperCAmelCase ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(_UpperCAmelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_UpperCAmelCase )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 339 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 1 |
def A ( _UpperCAmelCase : dict ) -> set:
'''simple docstring'''
_UpperCAmelCase = set()
# edges = list of graph's edges
_UpperCAmelCase = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCAmelCase , _UpperCAmelCase = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def A ( _UpperCAmelCase : dict ) -> set:
'''simple docstring'''
_UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 339 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCAmelCase ( A ):
UpperCamelCase = ''''''
UpperCamelCase = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : str , A : Optional[DatasetInfo] = None , A : Optional[str] = None , **A : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(self , **A)
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(A): {'name': str(A), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def _lowerCamelCase ( self : Union[str, Any] , A : str , A : str = "rb" , **A : int , ) -> int:
"""simple docstring"""
if not isinstance(self.repo_info , A):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}")
_UpperCAmelCase = hf_hub_url(self.repo_info.id , A , revision=self.repo_info.sha)
return fsspec.open(
A , mode=A , headers=get_authentication_headers_for_url(A , use_auth_token=self.token) , client_kwargs={'trust_env': True} , ).open()
def _lowerCamelCase ( self : Union[str, Any] , A : int , **A : Optional[int]) -> Any:
"""simple docstring"""
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(A)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A)
def _lowerCamelCase ( self : List[Any] , A : str , A : Any=False , **A : int) -> Optional[int]:
"""simple docstring"""
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip('/'))
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip('/'))
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values())
if detail:
return out
else:
return sorted(f['name'] for f in out)
| 339 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small')
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/mt5-small')
_UpperCAmelCase = tokenizer('Hello there' , return_tensors='np').input_ids
_UpperCAmelCase = tokenizer('Hi I am' , return_tensors='np').input_ids
_UpperCAmelCase = shift_tokens_right(A , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCAmelCase = model(A , decoder_input_ids=A).logits
_UpperCAmelCase = optax.softmax_cross_entropy(A , onehot(A , logits.shape[-1])).mean()
_UpperCAmelCase = -(labels.shape[-1] * loss.item())
_UpperCAmelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 339 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = RoCBertTokenizer
UpperCamelCase = None
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = filter_non_english
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for i, value in enumerate(A):
_UpperCAmelCase = i
_UpperCAmelCase = i
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
with open(self.word_shape_file , 'w' , encoding='utf-8') as word_shape_writer:
json.dump(A , A , ensure_ascii=A)
with open(self.word_pronunciation_file , 'w' , encoding='utf-8') as word_pronunciation_writer:
json.dump(A , A , ensure_ascii=A)
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
_UpperCAmelCase = tokenizer.tokenize('你好[SEP]你是谁')
self.assertListEqual(A , ['你', '好', '[SEP]', '你', '是', '谁'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A) , [5, 6, 2, 5, 7, 8])
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_UpperCAmelCase = {}
for i, token in enumerate(A):
_UpperCAmelCase = i
_UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=A , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
if self.test_rust_tokenizer:
_UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_UpperCAmelCase = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
_UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(A , 'do_lower_case') else False
_UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = ['的', '人', '有']
_UpperCAmelCase = ''.join(A)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = tokenizer_p.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_r.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A)
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A)
self.assertListEqual(A , A)
_UpperCAmelCase = False
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = tokenizer_r.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_p.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A)
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A)
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A)
]
self.assertListEqual(A , A)
self.assertListEqual(A , A)
@slow
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
_UpperCAmelCase = tokenizer.encode('你好' , add_special_tokens=A)
_UpperCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=A)
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A)
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A , A)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '你好,你是谁'
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(A)
_UpperCAmelCase = tokenizer.prepare_for_model(
A , A , A , add_special_tokens=A)
_UpperCAmelCase = tokenizer.encode_plus(A , add_special_tokens=A)
self.assertEqual(A , A)
| 339 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : int , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : int = 8 , **A : int , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
_UpperCAmelCase = pad_size
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str]) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : int , A : Optional[Union[str, ChannelDimension]] = None) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = get_image_size(A)
_UpperCAmelCase = (old_height // size + 1) * size - old_height
_UpperCAmelCase = (old_width // size + 1) * size - old_width
return pad(A , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=A)
def _lowerCamelCase ( self : Optional[int] , A : ImageInput , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : List[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_pad if do_pad is not None else self.do_pad
_UpperCAmelCase = pad_size if pad_size is not None else self.pad_size
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_pad:
_UpperCAmelCase = [self.pad(A , size=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
| 339 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_UpperCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_UpperCAmelCase = 4
_UpperCAmelCase = 48
_UpperCAmelCase = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_UpperCAmelCase = [6, 6, 6, 6]
_UpperCAmelCase = 60
_UpperCAmelCase = [6, 6, 6, 6]
_UpperCAmelCase = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_UpperCAmelCase = 4
_UpperCAmelCase = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = 126
_UpperCAmelCase = 7
_UpperCAmelCase = 255.0
_UpperCAmelCase = ''
return config
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> int:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_UpperCAmelCase = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
_UpperCAmelCase = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_UpperCAmelCase = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_UpperCAmelCase = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_UpperCAmelCase = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_UpperCAmelCase = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "conv_first" in name:
_UpperCAmelCase = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_UpperCAmelCase = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_UpperCAmelCase = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
_UpperCAmelCase = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
_UpperCAmelCase = name.replace('upsample.2' , 'upsample.convolution_1' )
_UpperCAmelCase = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_UpperCAmelCase = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
_UpperCAmelCase = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
_UpperCAmelCase = 'swin2sr.' + name
return name
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[4] )
_UpperCAmelCase = config.embed_dim
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[dim : dim * 2]
_UpperCAmelCase = val[-dim:]
pass
else:
_UpperCAmelCase = val
return orig_state_dict
def A ( _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
_UpperCAmelCase = get_config(_UpperCAmelCase )
_UpperCAmelCase = SwinaSRForImageSuperResolution(_UpperCAmelCase )
model.eval()
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(_UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
_UpperCAmelCase = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_UpperCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
_UpperCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_UpperCAmelCase = 126 if 'Jpeg' in checkpoint_url else 256
_UpperCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_UpperCAmelCase = transforms(_UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
_UpperCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
_UpperCAmelCase = model(_UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_UpperCAmelCase = torch.Size([1, 3, 512, 512] )
_UpperCAmelCase = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_UpperCAmelCase = torch.Size([1, 3, 1_024, 1_024] )
_UpperCAmelCase = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_UpperCAmelCase = torch.Size([1, 3, 1_024, 1_024] )
_UpperCAmelCase = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_UpperCAmelCase = torch.Size([1, 3, 512, 512] )
_UpperCAmelCase = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_UpperCAmelCase = torch.Size([1, 3, 1_024, 1_024] )
_UpperCAmelCase = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCAmelCase , atol=1E-3 )
print('Looks ok!' )
_UpperCAmelCase = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_UpperCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 339 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 1 |
from __future__ import annotations
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int | None = None , _UpperCAmelCase : int | None = None ) -> None:
'''simple docstring'''
if start is None:
_UpperCAmelCase = 0
if end is None:
_UpperCAmelCase = len(_UpperCAmelCase ) - 1
if start >= end:
return
_UpperCAmelCase = (start + end) // 2
slowsort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
slowsort(_UpperCAmelCase , mid + 1 , _UpperCAmelCase )
if sequence[end] < sequence[mid]:
_UpperCAmelCase , _UpperCAmelCase = sequence[mid], sequence[end]
slowsort(_UpperCAmelCase , _UpperCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 339 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Any , A : Dict , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : pl.LightningModule , A : str , A : List[Any]=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : Tuple , A : Union[str, Any]) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : List[Any] , A : pl.Trainer , A : pl.LightningModule) -> List[str]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : List[Any] , A : pl.Trainer , A : Any) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 339 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def A ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
_UpperCAmelCase = parser.parse_args()
return args.f
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : List[str]) -> None:
"""simple docstring"""
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(A)
def _lowerCamelCase ( self : Any , A : str) -> str:
"""simple docstring"""
_UpperCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(A , 'argv' , A):
_UpperCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(A , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(A)
_UpperCAmelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(A)
_UpperCAmelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(A)
| 339 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Optional[int]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(A)
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self._create_example_records()
_UpperCAmelCase = Dataset.from_list(A)
self.assertListEqual(dset.column_names , ['col_1', 'col_2'])
for i, r in enumerate(A):
self.assertDictEqual(A , example_records[i])
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self._create_example_records()
_UpperCAmelCase = Dataset.from_list(A)
_UpperCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : Any) -> Any: # checks what happens with missing columns
"""simple docstring"""
_UpperCAmelCase = [{'col_1': 1}, {'col_2': 'x'}]
_UpperCAmelCase = Dataset.from_list(A)
self.assertDictEqual(dset[0] , {'col_1': 1})
self.assertDictEqual(dset[1] , {'col_1': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[int]) -> Tuple: # checks if the type can be inferred from the second record
"""simple docstring"""
_UpperCAmelCase = [{'col_1': []}, {'col_1': [1, 2]}]
_UpperCAmelCase = Dataset.from_list(A)
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64')))
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Dataset.from_list([])
self.assertEqual(len(A) , 0)
self.assertListEqual(dset.column_names , [])
| 339 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
_UpperCAmelCase = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_UpperCAmelCase = model(A)['last_hidden_state']
_UpperCAmelCase = tf.TensorShape((1, 10, 7_68))
self.assertEqual(output.shape , A)
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 339 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 1 |
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if index == r:
for j in range(_UpperCAmelCase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase = arr[i]
combination_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index + 1 , _UpperCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
# A temporary array to store all combination one by one
_UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 0 , _UpperCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 339 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 1 |
from ...configuration_utils import PretrainedConfig
class __lowerCAmelCase ( A ):
UpperCamelCase = '''bert-generation'''
def __init__( self : Optional[Any] , A : Dict=5_03_58 , A : Any=10_24 , A : Optional[int]=24 , A : int=16 , A : Any=40_96 , A : int="gelu" , A : Union[str, Any]=0.1 , A : str=0.1 , A : List[str]=5_12 , A : str=0.0_2 , A : int=1E-12 , A : int=0 , A : int=2 , A : Any=1 , A : Optional[Any]="absolute" , A : Dict=True , **A : int , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 339 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 1 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 1 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 339 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = []
def _lowerCamelCase ( self : Any , A : List[str]) -> int:
"""simple docstring"""
return self.node_position[vertex]
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = pos
def _lowerCamelCase ( self : Tuple , A : Tuple , A : Dict , A : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase , _UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase , _UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase , _UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , A)
self.top_to_bottom(A , A , A , A)
def _lowerCamelCase ( self : Optional[int] , A : str , A : Optional[Any] , A : Optional[int] , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , A)
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , A)
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(A , 0)
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = len(A) // 2 - 1
for i in range(A , -1 , -1):
self.top_to_bottom(A , A , len(A) , A)
def _lowerCamelCase ( self : Optional[int] , A : int , A : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(A , 0 , len(A) , A)
return temp
def A ( _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_UpperCAmelCase )
_UpperCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 339 | 1 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_UpperCAmelCase = sorted(string.lower() )
return len(_UpperCAmelCase ) == len(set(_UpperCAmelCase ) )
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter a string ").strip()
UpperCAmelCase__ = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 339 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]:
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 )
_UpperCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 339 | 1 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 |
import math
import unittest
def A ( _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(11))
self.assertTrue(is_prime(13))
self.assertTrue(is_prime(17))
self.assertTrue(is_prime(19))
self.assertTrue(is_prime(23))
self.assertTrue(is_prime(29))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises(A):
is_prime(-19)
self.assertFalse(
is_prime(0) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 339 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(default=A , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCamelCase = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCamelCase = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCamelCase = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCamelCase = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''Source language id for translation.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''Target language id for translation.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F"{split}_results.json" ) )
def A ( ) -> List[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCAmelCase = SeqaSeqDataset
# Get datasets
_UpperCAmelCase = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCAmelCase = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCAmelCase = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
_UpperCAmelCase = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
_UpperCAmelCase = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
_UpperCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(metric_key_prefix='val' )
_UpperCAmelCase = data_args.n_val
_UpperCAmelCase = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
_UpperCAmelCase = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
_UpperCAmelCase = test_output.metrics
_UpperCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCAmelCase = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
_UpperCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
_UpperCAmelCase = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def A ( _UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCAmelCase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCAmelCase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCAmelCase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : str) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def _lowerCamelCase ( self : Union[str, Any] , A : List[List[List[str]]] , A : List[List[str]] , A : int = 1 , A : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A , hypotheses=A , min_len=A , max_len=A)
}
| 339 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase__ = True
except ImportError:
UpperCAmelCase__ = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase__ = _get_torch_home()
except ImportError:
UpperCAmelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
UpperCAmelCase__ = os.path.join(torch_cache_home, "transformers")
UpperCAmelCase__ = "https://cdn.huggingface.co"
UpperCAmelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
UpperCAmelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
UpperCAmelCase__ = os.path.join(PATH, "config.yaml")
UpperCAmelCase__ = os.path.join(PATH, "attributes.txt")
UpperCAmelCase__ = os.path.join(PATH, "objects.txt")
UpperCAmelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
UpperCAmelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase__ = "pytorch_model.bin"
UpperCAmelCase__ = "config.yaml"
def A ( _UpperCAmelCase : Any=OBJECTS , _UpperCAmelCase : Any=ATTRIBUTES ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = []
with open(_UpperCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
_UpperCAmelCase = []
with open(_UpperCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
with open(_UpperCAmelCase , 'rb' ) as f:
_UpperCAmelCase = pkl.load(_UpperCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCAmelCase = ckp.pop(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , np.ndarray ):
_UpperCAmelCase = torch.tensor(_UpperCAmelCase )
else:
assert isinstance(_UpperCAmelCase , torch.tensor ), type(_UpperCAmelCase )
_UpperCAmelCase = v
return r
class __lowerCAmelCase :
UpperCamelCase = {}
def __init__( self : Union[str, Any] , A : dict , A : str = "root" , A : Union[str, Any]=0) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = name
_UpperCAmelCase = level
_UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCAmelCase = copy.deepcopy(A)
_UpperCAmelCase = copy.deepcopy(A)
if isinstance(A , A):
_UpperCAmelCase = Config(A , name=A , level=level + 1)
_UpperCAmelCase = v
setattr(self , A , A)
_UpperCAmelCase = d
def __repr__( self : int) -> Optional[int]:
"""simple docstring"""
return str(list((self._pointer.keys())))
def __setattr__( self : int , A : Dict , A : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = val
_UpperCAmelCase = val
_UpperCAmelCase = key.split('.')
_UpperCAmelCase = len(A) - 1
_UpperCAmelCase = self._pointer
if len(A) > 1:
for i, l in enumerate(A):
if hasattr(self , A) and isinstance(getattr(self , A) , A):
setattr(getattr(self , A) , '.'.join(levels[i:]) , A)
if l == last_level:
_UpperCAmelCase = val
else:
_UpperCAmelCase = pointer[l]
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
return self._pointer
def _lowerCamelCase ( self : Optional[Any] , A : Dict , A : List[str]) -> str:
"""simple docstring"""
with open(F"{file_name}" , 'w') as stream:
dump(A , A)
def _lowerCamelCase ( self : Union[str, Any] , A : Tuple , A : Dict) -> str:
"""simple docstring"""
with open(F"{file_name}" , 'w') as stream:
json.dump(A , A)
@staticmethod
def _lowerCamelCase ( A : Optional[Any]) -> int:
"""simple docstring"""
with open(A) as stream:
_UpperCAmelCase = load(A , Loader=A)
return data
def __str__( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = ' '
if self._name != "root":
_UpperCAmelCase = F"{t * (self._level-1)}{self._name}:\n"
else:
_UpperCAmelCase = ''
_UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(A , A):
r += F"{t * (self._level)}{v}\n"
self._level += 1
else:
r += F"{t * (self._level)}{k}: {v} ({type(A).__name__})\n"
_UpperCAmelCase = level
return r[:-1]
@classmethod
def _lowerCamelCase ( cls : Any , A : str , **A : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(A , **A)
return cls(A)
@classmethod
def _lowerCamelCase ( cls : str , A : str , **A : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('cache_dir' , A)
_UpperCAmelCase = kwargs.pop('force_download' , A)
_UpperCAmelCase = kwargs.pop('resume_download' , A)
_UpperCAmelCase = kwargs.pop('proxies' , A)
_UpperCAmelCase = kwargs.pop('local_files_only' , A)
if os.path.isdir(A):
_UpperCAmelCase = os.path.join(A , A)
elif os.path.isfile(A) or is_remote_url(A):
_UpperCAmelCase = pretrained_model_name_or_path
else:
_UpperCAmelCase = hf_bucket_url(A , filename=A , use_cdn=A)
try:
# Load from URL or cache if already cached
_UpperCAmelCase = cached_path(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCAmelCase = Config.load_yaml(A)
except EnvironmentError:
_UpperCAmelCase = 'Can\'t load config for'
raise EnvironmentError(A)
if resolved_config_file == config_file:
print('loading configuration file from path')
else:
print('loading configuration file cache')
return Config.load_yaml(A), kwargs
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.load('dump.pt' , map_location=in_tensor.device )
_UpperCAmelCase = in_tensor.numpy()
_UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(_UpperCAmelCase , _UpperCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def A ( _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = urlparse(_UpperCAmelCase )
return parsed.scheme in ("http", "https")
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=True ) -> str:
'''simple docstring'''
_UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCAmelCase = '/' not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(_UpperCAmelCase , _UpperCAmelCase ) for k, v in user_agent.items() )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
ua += "; " + user_agent
_UpperCAmelCase = {'user-agent': ua}
if resume_size > 0:
_UpperCAmelCase = 'bytes=%d-' % (resume_size,)
_UpperCAmelCase = requests.get(_UpperCAmelCase , stream=_UpperCAmelCase , proxies=_UpperCAmelCase , headers=_UpperCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
_UpperCAmelCase = response.headers.get('Content-Length' )
_UpperCAmelCase = resume_size + int(_UpperCAmelCase ) if content_length is not None else None
_UpperCAmelCase = tqdm(
unit='B' , unit_scale=_UpperCAmelCase , total=_UpperCAmelCase , initial=_UpperCAmelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_UpperCAmelCase ) )
temp_file.write(_UpperCAmelCase )
progress.close()
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : int=False , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
_UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = str(_UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = None
if not local_files_only:
try:
_UpperCAmelCase = requests.head(_UpperCAmelCase , allow_redirects=_UpperCAmelCase , proxies=_UpperCAmelCase , timeout=_UpperCAmelCase )
if response.status_code == 200:
_UpperCAmelCase = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCAmelCase = url_to_filename(_UpperCAmelCase , _UpperCAmelCase )
# get cache path to put the file
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_UpperCAmelCase ):
return cache_path
else:
_UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(_UpperCAmelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_UpperCAmelCase ) > 0:
return os.path.join(_UpperCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_UpperCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCAmelCase = cache_path + '.lock'
with FileLock(_UpperCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_UpperCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCAmelCase = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_UpperCAmelCase , 'a+b' ) as f:
yield f
_UpperCAmelCase = _resumable_file_manager
if os.path.exists(_UpperCAmelCase ):
_UpperCAmelCase = os.stat(_UpperCAmelCase ).st_size
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=_UpperCAmelCase , delete=_UpperCAmelCase )
_UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _UpperCAmelCase , temp_file.name , )
http_get(
_UpperCAmelCase , _UpperCAmelCase , proxies=_UpperCAmelCase , resume_size=_UpperCAmelCase , user_agent=_UpperCAmelCase , )
os.replace(temp_file.name , _UpperCAmelCase )
_UpperCAmelCase = {'url': url, 'etag': etag}
_UpperCAmelCase = cache_path + '.json'
with open(_UpperCAmelCase , 'w' ) as meta_file:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
return cache_path
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]=None ) -> Any:
'''simple docstring'''
_UpperCAmelCase = url.encode('utf-8' )
_UpperCAmelCase = shaaaa(_UpperCAmelCase )
_UpperCAmelCase = url_hash.hexdigest()
if etag:
_UpperCAmelCase = etag.encode('utf-8' )
_UpperCAmelCase = shaaaa(_UpperCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=False , _UpperCAmelCase : int=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : str=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
_UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = str(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = str(_UpperCAmelCase )
if is_remote_url(_UpperCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
_UpperCAmelCase = get_from_cache(
_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , user_agent=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
elif os.path.exists(_UpperCAmelCase ):
# File, and it exists.
_UpperCAmelCase = url_or_filename
elif urlparse(_UpperCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_UpperCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_UpperCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(_UpperCAmelCase ) and not tarfile.is_tarfile(_UpperCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCAmelCase , _UpperCAmelCase = os.path.split(_UpperCAmelCase )
_UpperCAmelCase = output_file.replace('.' , '-' ) + '-extracted'
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCAmelCase = output_path + '.lock'
with FileLock(_UpperCAmelCase ):
shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase )
os.makedirs(_UpperCAmelCase )
if is_zipfile(_UpperCAmelCase ):
with ZipFile(_UpperCAmelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(_UpperCAmelCase ):
_UpperCAmelCase = tarfile.open(_UpperCAmelCase )
tar_file.extractall(_UpperCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_UpperCAmelCase ) )
return output_path_extracted
return output_path
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any]="," ) -> Dict:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = eval(f.read() )
else:
_UpperCAmelCase = requests.get(_UpperCAmelCase )
try:
_UpperCAmelCase = requests.json()
except Exception:
_UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCAmelCase = eval(_UpperCAmelCase )
except Exception:
_UpperCAmelCase = data.split('\n' )
req.close()
return data
def A ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = requests.get(_UpperCAmelCase )
_UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A ( _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_UpperCAmelCase )
with open(_UpperCAmelCase , 'rb' ) as stream:
_UpperCAmelCase = pkl.load(_UpperCAmelCase )
_UpperCAmelCase = weights.pop('model' )
_UpperCAmelCase = {}
for k, v in model.items():
_UpperCAmelCase = torch.from_numpy(_UpperCAmelCase )
if "running_var" in k:
_UpperCAmelCase = torch.tensor([0] )
_UpperCAmelCase = k.replace('running_var' , 'num_batches_tracked' )
_UpperCAmelCase = zero
return new
def A ( ) -> str:
'''simple docstring'''
print(F"{os.path.abspath(os.path.join(_UpperCAmelCase , os.pardir ) )}/demo.ipynb" )
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]="RGB" ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = cva.imread(_UpperCAmelCase )
else:
_UpperCAmelCase = get_image_from_url(_UpperCAmelCase )
assert img is not None, F"could not connect to: {im}"
_UpperCAmelCase = cva.cvtColor(_UpperCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCAmelCase = img[:, :, ::-1]
return img
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=1 ) -> List[Any]:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ))
| 339 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = 'ylacombe/bark-small'
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = 'en_speaker_1'
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = 'speaker_embeddings_path.json'
_UpperCAmelCase = 'speaker_embeddings'
def _lowerCamelCase ( self : List[str] , **A : Any) -> Optional[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **A)
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=A)
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
_UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase = 35
_UpperCAmelCase = 2
_UpperCAmelCase = 8
_UpperCAmelCase = {
'semantic_prompt': np.ones(A),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
_UpperCAmelCase = processor(text=self.input_string , voice_preset=A)
_UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A , np.array([])).tolist())
# test loading voice preset from npz file
_UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz')
np.savez(A , **A)
_UpperCAmelCase = processor(text=self.input_string , voice_preset=A)
_UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A , np.array([])).tolist())
# test loading voice preset from the hub
_UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset)
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BarkProcessor(tokenizer=A)
_UpperCAmelCase = processor(text=self.input_string)
_UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=2_56 , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 339 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
UpperCAmelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DistilBertTokenizer
def __init__( self : Any , A : List[Any]=None , A : Tuple=None , A : List[Any]=True , A : Union[str, Any]="[UNK]" , A : Optional[Any]="[SEP]" , A : str="[PAD]" , A : Any="[CLS]" , A : Optional[int]="[MASK]" , A : Any=True , A : List[Any]=None , **A : int , ) -> List[str]:
"""simple docstring"""
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , A) != do_lower_case
or normalizer_state.get('strip_accents' , A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , A) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(A , normalizer_state.pop('type'))
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**A)
_UpperCAmelCase = do_lower_case
def _lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : Optional[Any]=None) -> Dict:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : str , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(A , name=A)
return tuple(A)
| 339 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 |
from functools import reduce
UpperCAmelCase__ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def A ( _UpperCAmelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = inspect.getfile(accelerate.test_utils)
_UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
_UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_distributed_data_loop.py'])
_UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_ops.py'])
@require_multi_gpu
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices.")
_UpperCAmelCase = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A , env=os.environ.copy())
@require_multi_gpu
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices.")
_UpperCAmelCase = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}")
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A , env=os.environ.copy())
@require_multi_gpu
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(A , env=os.environ.copy())
@require_multi_gpu
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only")
_UpperCAmelCase = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1'):
execute_subprocess_async(A , env=os.environ.copy())
if __name__ == "__main__":
UpperCAmelCase__ = Accelerator()
UpperCAmelCase__ = (accelerator.state.process_index + 2, 10)
UpperCAmelCase__ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase__ = ""
UpperCAmelCase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 339 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
from __future__ import annotations
def A ( _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [True] * limit
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase = i * 2
while index < limit:
_UpperCAmelCase = False
_UpperCAmelCase = index + i
_UpperCAmelCase = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def A ( _UpperCAmelCase : int = 1_000_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = prime_sieve(_UpperCAmelCase )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + length , len(_UpperCAmelCase ) ):
_UpperCAmelCase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase = j - i
_UpperCAmelCase = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : Any , A : str) -> Optional[Any]:
"""simple docstring"""
with open(A , encoding='utf-8') as input_file:
_UpperCAmelCase = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
_UpperCAmelCase = input_file.read()
_UpperCAmelCase = regexp.search(A)
return match
def _lowerCamelCase ( self : Tuple , A : str) -> Optional[int]:
"""simple docstring"""
with open(A , encoding='utf-8') as input_file:
_UpperCAmelCase = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL)
_UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCAmelCase = regexp.finditer(A)
_UpperCAmelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = Path('./datasets')
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A)):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}")
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = Path('./datasets')
_UpperCAmelCase = list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(A)):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead.")
| 339 |
import os
UpperCAmelCase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(_UpperCAmelCase ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A ( _UpperCAmelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(_UpperCAmelCase ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(_UpperCAmelCase )
_UpperCAmelCase = generate_roman_numerals(_UpperCAmelCase )
savings += len(_UpperCAmelCase ) - len(_UpperCAmelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( A ):
UpperCamelCase = '''ClapFeatureExtractor'''
UpperCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Optional[int] , A : Any , A : Optional[Any]) -> Any:
"""simple docstring"""
super().__init__(A , A)
def __call__( self : List[Any] , A : List[Any]=None , A : Optional[int]=None , A : Any=None , **A : Any) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('sampling_rate' , A)
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.')
if text is not None:
_UpperCAmelCase = self.tokenizer(A , return_tensors=A , **A)
if audios is not None:
_UpperCAmelCase = self.feature_extractor(
A , sampling_rate=A , return_tensors=A , **A)
if text is not None and audios is not None:
_UpperCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A) , tensor_type=A)
def _lowerCamelCase ( self : int , *A : Union[str, Any] , **A : Dict) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*A , **A)
def _lowerCamelCase ( self : Optional[Any] , *A : Optional[int] , **A : Tuple) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*A , **A)
@property
def _lowerCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 339 |
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str , _UpperCAmelCase : dict ) -> str:
'''simple docstring'''
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase , params=_UpperCAmelCase ).content , 'html.parser' )
_UpperCAmelCase = soup.find('div' , attrs={'class': 'gs_ri'} )
_UpperCAmelCase = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 339 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''convbert'''
def __init__( self : Optional[Any] , A : Union[str, Any]=3_05_22 , A : Union[str, Any]=7_68 , A : Any=12 , A : str=12 , A : Optional[int]=30_72 , A : Union[str, Any]="gelu" , A : str=0.1 , A : Union[str, Any]=0.1 , A : Any=5_12 , A : str=2 , A : List[str]=0.0_2 , A : List[str]=1E-12 , A : List[Any]=1 , A : List[Any]=0 , A : Union[str, Any]=2 , A : Optional[Any]=7_68 , A : Optional[int]=2 , A : Tuple=9 , A : List[Any]=1 , A : Any=None , **A : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = embedding_size
_UpperCAmelCase = head_ratio
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = num_groups
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( A ):
@property
def _lowerCamelCase ( self : Any) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 339 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 1 |
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339 |
UpperCAmelCase__ = {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCAmelCase = _calculate(days - 1 , _UpperCAmelCase , 0 )
_UpperCAmelCase = state_late + state_absent + state_ontime
_UpperCAmelCase = prizestrings
return prizestrings
def A ( _UpperCAmelCase : int = 30 ) -> int:
'''simple docstring'''
return _calculate(_UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 339 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase__ = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
UpperCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ = Path(tmpdirname)
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
UpperCAmelCase__ = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
UpperCAmelCase__ = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase__ = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase__ = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCAmelCase__ = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCAmelCase__ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 339 |
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, "src", "diffusers")
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = find_backend(' if not is_torch_available():')
self.assertEqual(A , 'torch')
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCAmelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(A , 'torch_and_transformers')
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCAmelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(A , 'torch_and_transformers_and_onnx')
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A)
self.assertIn('torch_and_transformers' , A)
self.assertIn('flax_and_transformers' , A)
self.assertIn('torch_and_transformers_and_onnx' , A)
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'])
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'])
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'])
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = create_dummy_object('CONSTANT' , '\'torch\'')
self.assertEqual(A , '\nCONSTANT = None\n')
_UpperCAmelCase = create_dummy_object('function' , '\'torch\'')
self.assertEqual(
A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n')
_UpperCAmelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_UpperCAmelCase = create_dummy_object('FakeClass' , '\'torch\'')
self.assertEqual(A , A)
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_UpperCAmelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'] , A)
| 339 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( A ):
UpperCamelCase = (EulerDiscreteScheduler,)
UpperCamelCase = 1_0
def _lowerCamelCase ( self : Optional[Any] , **A : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**A)
return config
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=A)
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=A , beta_end=A)
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A)
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A)
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**A)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(A)
for i, t in enumerate(scheduler.timesteps):
_UpperCAmelCase = scheduler.scale_model_input(A , A)
_UpperCAmelCase = model(A , A)
_UpperCAmelCase = scheduler.step(A , A , A , generator=A)
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(A))
_UpperCAmelCase = torch.mean(torch.abs(A))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type='v_prediction')
_UpperCAmelCase = scheduler_class(**A)
scheduler.set_timesteps(self.num_inference_steps)
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(A)
for i, t in enumerate(scheduler.timesteps):
_UpperCAmelCase = scheduler.scale_model_input(A , A)
_UpperCAmelCase = model(A , A)
_UpperCAmelCase = scheduler.step(A , A , A , generator=A)
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(A))
_UpperCAmelCase = torch.mean(torch.abs(A))
assert abs(result_sum.item() - 0.0_0_0_2) < 1E-2
assert abs(result_mean.item() - 2.2676E-06) < 1E-3
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**A)
scheduler.set_timesteps(self.num_inference_steps , device=A)
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase = sample.to(A)
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(A , A)
_UpperCAmelCase = model(A , A)
_UpperCAmelCase = scheduler.step(A , A , A , generator=A)
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(A))
_UpperCAmelCase = torch.mean(torch.abs(A))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1E-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1E-3
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**A , use_karras_sigmas=A)
scheduler.set_timesteps(self.num_inference_steps , device=A)
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase = sample.to(A)
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(A , A)
_UpperCAmelCase = model(A , A)
_UpperCAmelCase = scheduler.step(A , A , A , generator=A)
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(A))
_UpperCAmelCase = torch.mean(torch.abs(A))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1E-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1E-3
| 339 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
UpperCamelCase = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def A ( ) -> Optional[int]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
_UpperCAmelCase = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_UpperCAmelCase = load_dataset('csv' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase = load_dataset('json' , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase = raw_datasets['train'].features['label'].names
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCAmelCase , )
_UpperCAmelCase = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase = {'Refused': 0, 'Entailed': 1}
_UpperCAmelCase = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_UpperCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCAmelCase : Union[str, Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCAmelCase : Dict ):
_UpperCAmelCase = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_UpperCAmelCase = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase = examples['statement']
_UpperCAmelCase = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_UpperCAmelCase = tokenizer(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase )
_UpperCAmelCase = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_UpperCAmelCase = raw_datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCAmelCase = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCAmelCase = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_UpperCAmelCase = raw_datasets['test']
if data_args.max_predict_samples is not None:
_UpperCAmelCase = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase = predict_dataset.remove_columns('label' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' ).predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
_UpperCAmelCase = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self : List[Any] , A : Tuple , A : Union[str, Any]=13 , A : Dict=30 , A : Any=2 , A : str=3 , A : Any=True , A : Tuple=True , A : Dict=32 , A : int=2 , A : Union[str, Any]=4 , A : List[str]=37 , A : int="gelu" , A : Optional[Any]=0.1 , A : Optional[int]=0.1 , A : List[Any]=10 , A : Dict=0.0_2 , A : Any=3 , A : str=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : List[str] , A : Dict , A : str , A : List[str]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFViTModel(config=A)
_UpperCAmelCase = model(A , training=A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
_UpperCAmelCase = self.image_size // 2
_UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCAmelCase = model(A , interpolate_pos_encoding=A , training=A)
_UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def _lowerCamelCase ( self : int , A : List[Any] , A : Optional[Any] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = TFViTForImageClassification(A)
_UpperCAmelCase = model(A , labels=A , training=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
_UpperCAmelCase = self.image_size // 2
_UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCAmelCase = model(A , interpolate_pos_encoding=A , training=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFViTForImageClassification(A)
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFViTModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37)
def _lowerCamelCase ( self : str) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , tf.keras.layers.Layer))
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(A)
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='tf')
# forward pass
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3] , A , atol=1E-4)
| 339 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(A )
class __lowerCAmelCase ( A ):
def __init__( self : List[str] , *A : Tuple , **A : str) -> Dict:
"""simple docstring"""
super().__init__(*A , **A)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING)
def _lowerCamelCase ( self : Any , A : Optional[Any]=None , A : Union[str, Any]=None , A : Optional[int]=None) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one')
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **A : Any) -> Dict:
"""simple docstring"""
return super().__call__(A , **A)
def _lowerCamelCase ( self : Any , A : Union[str, Any] , A : str=None) -> str:
"""simple docstring"""
_UpperCAmelCase = load_image(A)
if prompt is not None:
if not isinstance(A , A):
raise ValueError(
F"Received an invalid text input, got - {type(A)} - but expected a single string. "
'Note also that one single text can be provided for conditional image to text generation.')
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=A , return_tensors=self.framework)
_UpperCAmelCase = self.tokenizer(text=A , add_special_tokens=A).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(A).unsqueeze(0)
model_inputs.update({'input_ids': input_ids})
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=A , header_text=A , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=A , return_tensors=self.framework)
_UpperCAmelCase = self.tokenizer(A , return_tensors=self.framework)
model_inputs.update(A)
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation")
else:
_UpperCAmelCase = self.image_processor(images=A , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : Union[str, Any]=None) -> List[Any]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , A)
and all(x is None for x in model_inputs['input_ids'])
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name)
_UpperCAmelCase = self.model.generate(A , **A , **A)
return model_outputs
def _lowerCamelCase ( self : Optional[int] , A : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
A , skip_special_tokens=A , )
}
records.append(A)
return records
| 339 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCAmelCase__ = TypeVar("KEY")
UpperCAmelCase__ = TypeVar("VAL")
@dataclass(frozen=A , slots=A )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
UpperCamelCase = 42
UpperCamelCase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self : Optional[Any]) -> None:
"""simple docstring"""
super().__init__(A , A)
def __bool__( self : Any) -> bool:
"""simple docstring"""
return False
UpperCAmelCase__ = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self : List[str] , A : int = 8 , A : float = 0.7_5) -> None:
"""simple docstring"""
_UpperCAmelCase = initial_block_size
_UpperCAmelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCAmelCase = capacity_factor
_UpperCAmelCase = 0
def _lowerCamelCase ( self : Union[str, Any] , A : KEY) -> int:
"""simple docstring"""
return hash(A) % len(self._buckets)
def _lowerCamelCase ( self : Optional[int] , A : int) -> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets)
def _lowerCamelCase ( self : int , A : int , A : KEY , A : VAL) -> bool:
"""simple docstring"""
_UpperCAmelCase = self._buckets[ind]
if not stored:
_UpperCAmelCase = _Item(A , A)
self._len += 1
return True
elif stored.key == key:
_UpperCAmelCase = _Item(A , A)
return True
else:
return False
def _lowerCamelCase ( self : Any) -> bool:
"""simple docstring"""
_UpperCAmelCase = len(self._buckets) * self._capacity_factor
return len(self) >= int(A)
def _lowerCamelCase ( self : List[str]) -> bool:
"""simple docstring"""
if len(self._buckets) <= self._initial_block_size:
return False
_UpperCAmelCase = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def _lowerCamelCase ( self : Tuple , A : int) -> None:
"""simple docstring"""
_UpperCAmelCase = self._buckets
_UpperCAmelCase = [None] * new_size
_UpperCAmelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val)
def _lowerCamelCase ( self : Any) -> None:
"""simple docstring"""
self._resize(len(self._buckets) * 2)
def _lowerCamelCase ( self : Union[str, Any]) -> None:
"""simple docstring"""
self._resize(len(self._buckets) // 2)
def _lowerCamelCase ( self : Union[str, Any] , A : KEY) -> Iterator[int]:
"""simple docstring"""
_UpperCAmelCase = self._get_bucket_index(A)
for _ in range(len(self._buckets)):
yield ind
_UpperCAmelCase = self._get_next_ind(A)
def _lowerCamelCase ( self : Any , A : KEY , A : VAL) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(A):
if self._try_set(A , A , A):
break
def __setitem__( self : int , A : KEY , A : VAL) -> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(A , A)
def __delitem__( self : Dict , A : KEY) -> None:
"""simple docstring"""
for ind in self._iterate_buckets(A):
_UpperCAmelCase = self._buckets[ind]
if item is None:
raise KeyError(A)
if item is _deleted:
continue
if item.key == key:
_UpperCAmelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Any , A : KEY) -> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(A):
_UpperCAmelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(A)
def __len__( self : int) -> int:
"""simple docstring"""
return self._len
def __iter__( self : Dict) -> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item)
return F"HashMap({val_string})"
| 339 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''data2vec-audio'''
def __init__( self : List[Any] , A : Union[str, Any]=32 , A : List[Any]=7_68 , A : Optional[Any]=12 , A : int=12 , A : int=30_72 , A : Optional[int]="gelu" , A : int=0.1 , A : List[str]=0.1 , A : Optional[Any]=0.1 , A : List[Any]=0.0 , A : Any=0.1 , A : Optional[int]=0.1 , A : List[Any]=0.0_2 , A : Tuple=1E-5 , A : Union[str, Any]="gelu" , A : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , A : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , A : List[Any]=False , A : List[Any]=16 , A : List[Any]=19 , A : Dict=5 , A : str=0.0_5 , A : int=10 , A : List[str]=2 , A : int=0.0 , A : List[Any]=10 , A : Optional[Any]=0 , A : Optional[Any]="sum" , A : Any=False , A : Union[str, Any]=False , A : int=2_56 , A : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , A : List[str]=(5, 3, 3, 1, 1) , A : List[Any]=(1, 2, 3, 1, 1) , A : List[Any]=5_12 , A : int=0 , A : int=1 , A : Optional[Any]=2 , A : str=False , A : int=3 , A : Optional[int]=2 , A : Optional[Any]=3 , A : Optional[int]=None , **A : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A)
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(A)
_UpperCAmelCase = list(A)
_UpperCAmelCase = list(A)
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = conv_pos_kernel_size
_UpperCAmelCase = len(self.conv_dim)
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
_UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# adapter
_UpperCAmelCase = add_adapter
_UpperCAmelCase = adapter_kernel_size
_UpperCAmelCase = adapter_stride
_UpperCAmelCase = num_adapter_layers
_UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(A)
_UpperCAmelCase = list(A)
_UpperCAmelCase = list(A)
_UpperCAmelCase = xvector_output_dim
@property
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
return math.prod(self.conv_stride)
| 339 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> str:
'''simple docstring'''
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ = parse_flag_from_env("RUN_SLOW", default=False)
def A ( _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
if test_case is None:
return partial(_UpperCAmelCase , version=_UpperCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , _UpperCAmelCase ) , F"test requires torch version >= {version}" )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCAmelCase )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> str:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int] , A : Union[mock.Mock, List[mock.Mock]]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = mocks if isinstance(A , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = AcceleratorState()
_UpperCAmelCase = tensor[None].clone().to(state.device )
_UpperCAmelCase = gather(_UpperCAmelCase ).cpu()
_UpperCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCAmelCase ):
return False
return True
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Union[str, Any] , A : Optional[int] , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def A ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_UpperCAmelCase )
else:
break
async def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Union[str, Any]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str="" ):
_UpperCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(_UpperCAmelCase )
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCAmelCase : tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=None , _UpperCAmelCase : str=180 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase ) )
_UpperCAmelCase = ' '.join(_UpperCAmelCase )
if result.returncode > 0:
_UpperCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowerCAmelCase ( A ):
pass
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
_UpperCAmelCase = subprocess.check_output(_UpperCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCAmelCase , 'decode' ):
_UpperCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(_UpperCAmelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 339 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_UpperCAmelCase = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(F"{key} -> {new_key}" )
_UpperCAmelCase = s_dict.pop(_UpperCAmelCase )
return s_dict
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase = emb.weight.data
return lin_layer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> bytes:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = url.split('/' )[-2]
_UpperCAmelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if os.path.exists(_UpperCAmelCase ) and not os.path.isfile(_UpperCAmelCase ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCAmelCase ) as source, open(_UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=_UpperCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase = source.read(8_192 )
if not buffer:
break
output.write(_UpperCAmelCase )
loop.update(len(_UpperCAmelCase ) )
_UpperCAmelCase = open(_UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(_UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
_UpperCAmelCase = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
_UpperCAmelCase = original_checkpoint['dims']
_UpperCAmelCase = original_checkpoint['model_state_dict']
_UpperCAmelCase = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(_UpperCAmelCase )
rename_keys(_UpperCAmelCase )
_UpperCAmelCase = True
_UpperCAmelCase = state_dict['decoder.layers.0.fc1.weight'].shape[0]
_UpperCAmelCase = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=_UpperCAmelCase , decoder_ffn_dim=_UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
_UpperCAmelCase = WhisperForConditionalGeneration(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0 and not set(_UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F" but all the following weights are missing {missing}" )
if tie_embeds:
_UpperCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase = proj_out_weights
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 339 | 1 |
import argparse
from collections import defaultdict
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_UpperCAmelCase , 'r' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = F"class {class_name}("
_UpperCAmelCase = F"{4 * ' '}def {test_name}("
_UpperCAmelCase = F"{8 * ' '}{correct_line.split()[0]}"
_UpperCAmelCase = F"{16 * ' '}{correct_line.split()[0]}"
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
_UpperCAmelCase = True
elif in_class and line.startswith(_UpperCAmelCase ):
_UpperCAmelCase = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
_UpperCAmelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_UpperCAmelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_UpperCAmelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=None ) -> Optional[Any]:
'''simple docstring'''
if fail is not None:
with open(_UpperCAmelCase , 'r' ) as f:
_UpperCAmelCase = {l.strip() for l in f.readlines()}
else:
_UpperCAmelCase = None
with open(_UpperCAmelCase , 'r' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = defaultdict(_UpperCAmelCase )
for line in correct_lines:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
UpperCAmelCase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 339 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.