code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , *,
UpperCamelCase : int = 4 , UpperCamelCase : int = 7_68 , UpperCamelCase : int , UpperCamelCase : str , ):
'''simple docstring'''
super().__init__()
_snake_case : Tuple = nn.Parameter(torch.zeros(UpperCamelCase ) )
# parameters for additional clip time embeddings
_snake_case : Optional[Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : int = nn.Linear(UpperCamelCase , UpperCamelCase )
# parameters for encoder hidden states
_snake_case : Optional[Any] = clip_extra_context_tokens
_snake_case : Optional[int] = nn.Linear(
UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
_snake_case : List[Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : str = nn.LayerNorm(UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , *, UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_snake_case : str = image_embeddings.shape[0]
_snake_case : Union[str, Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_snake_case : str = classifier_free_guidance_embeddings.expand(
UpperCamelCase , -1 )
_snake_case : Union[str, Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_snake_case : Tuple = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_snake_case : int = self.embedding_proj(UpperCamelCase )
_snake_case : Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase )
_snake_case : Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_snake_case : Optional[Any] = self.clip_extra_context_tokens_proj(UpperCamelCase )
_snake_case : List[Any] = clip_extra_context_tokens.reshape(UpperCamelCase , -1 , self.clip_extra_context_tokens )
_snake_case : Union[str, Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
_snake_case : List[Any] = self.encoder_hidden_states_proj(UpperCamelCase )
_snake_case : List[str] = self.text_encoder_hidden_states_norm(UpperCamelCase )
_snake_case : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""BridgeTowerImageProcessor"""
a_ : Tuple =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : str , UpperCamelCase : str , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# add pixel_values + pixel_mask
_snake_case : Union[str, Any] = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , do_normalize=UpperCamelCase , do_center_crop=UpperCamelCase , **UpperCamelCase )
encoding.update(UpperCamelCase )
return encoding
def UpperCamelCase_ ( self : int , *UpperCamelCase : Tuple , **UpperCamelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Dict , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.tokenizer.model_input_names
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int = 50 )-> int:
_snake_case : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
if "://" in dataset_path:
_snake_case : List[Any] = dataset_path.split('://' )[1]
return dataset_path
def lowerCamelCase_ ( lowerCAmelCase: fsspec.AbstractFileSystem )-> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase_ ( lowerCAmelCase: fsspec.AbstractFileSystem , lowerCAmelCase: str , lowerCAmelCase: str )-> Union[str, Any]:
_snake_case : Optional[int] = not is_remote_filesystem(lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase ) , fs._strip_protocol(lowerCAmelCase ) )
else:
fs.mv(lowerCAmelCase , lowerCAmelCase , recursive=lowerCAmelCase )
def lowerCamelCase_ ( )-> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case : Tuple = None
_snake_case : Any = None
_snake_case : Optional[int] = threading.Lock()
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 6_5536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_8000,
"""sample_size""": 13_1072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_6000,
"""sample_size""": 6_5536,
},
}
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: List[str] )-> int:
return torch.atana(lowerCAmelCase , lowerCAmelCase ) / math.pi * 2
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Optional[Any]:
_snake_case : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
_snake_case : Dict = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase , lowerCAmelCase )
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
pass
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = DiffusionAttnUnetaD(UpperCamelCase , n_attn_layers=4 )
_snake_case : Tuple = deepcopy(self.diffusion )
_snake_case : List[str] = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> int:
_snake_case : Any = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
lowerCAmelCase_ = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
lowerCAmelCase_ = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
lowerCAmelCase_ = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
lowerCAmelCase_ = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
lowerCAmelCase_ = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
lowerCAmelCase_ = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Union[str, Any]:
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase ) and not isinstance(lowerCAmelCase , lowerCAmelCase ):
return name.replace(lowerCAmelCase , lowerCAmelCase )
elif name.startswith(lowerCAmelCase ):
return [name.replace(lowerCAmelCase , lowerCAmelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: str=13 )-> Any:
_snake_case : Optional[Any] = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_snake_case : Optional[int] = 0
if string.startswith('net.3.' ):
depth += 1
_snake_case : Union[str, Any] = string[6:]
elif string.startswith('net.' ):
_snake_case : Dict = string[4:]
while string.startswith('main.7.' ):
depth += 1
_snake_case : Optional[Any] = string[7:]
if string.startswith('main.' ):
_snake_case : Tuple = string[5:]
# mid block
if string[:2].isdigit():
_snake_case : Optional[Any] = string[:2]
_snake_case : Union[str, Any] = string[2:]
else:
_snake_case : str = string[0]
_snake_case : Any = string[1:]
if depth == max_depth:
_snake_case : int = MID_NUM_TO_LAYER[layer_num]
_snake_case : int = 'mid_block'
elif depth > 0 and int(lowerCAmelCase ) < 7:
_snake_case : Dict = DOWN_NUM_TO_LAYER[layer_num]
_snake_case : Any = F"""down_blocks.{depth}"""
elif depth > 0 and int(lowerCAmelCase ) > 7:
_snake_case : Optional[Any] = UP_NUM_TO_LAYER[layer_num]
_snake_case : Union[str, Any] = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_snake_case : Optional[int] = DEPTH_0_TO_LAYER[layer_num]
_snake_case : Union[str, Any] = F"""up_blocks.{max_depth - 1}""" if int(lowerCAmelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_snake_case : Union[str, Any] = string_left[1:]
if "resnets" in new_layer:
_snake_case : str = convert_resconv_naming(lowerCAmelCase )
elif "attentions" in new_layer:
_snake_case : Tuple = convert_attn_naming(lowerCAmelCase )
_snake_case : List[str] = new_string_left
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = prefix + '.' + new_layer + '.' + string_left
else:
_snake_case : Any = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> List[str]:
_snake_case : Tuple = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_snake_case : int = rename(lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = transform_conv_attns(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
_snake_case : Tuple = v
return new_state_dict
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[Any] )-> Dict:
if len(lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_snake_case : Optional[int] = v[:, :, 0]
else:
# bias
_snake_case : Dict = v
else:
# qkv matrices
_snake_case : Tuple = v.shape[0]
_snake_case : int = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_snake_case : str = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_snake_case : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase_ ( lowerCAmelCase: Any )-> List[str]:
_snake_case : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_snake_case : int = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_snake_case : Union[str, Any] = download(lowerCAmelCase )
_snake_case : Union[str, Any] = MODELS_MAP[model_name]['sample_rate']
_snake_case : str = MODELS_MAP[model_name]['sample_size']
_snake_case : List[str] = Object()
_snake_case : Tuple = sample_size
_snake_case : List[str] = sample_rate
_snake_case : Any = 0
_snake_case : Any = UNetaDModel(sample_size=lowerCAmelCase , sample_rate=lowerCAmelCase )
_snake_case : Tuple = diffusers_model.state_dict()
_snake_case : Optional[int] = DiffusionUncond(lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase )['state_dict'] )
_snake_case : Any = orig_model.diffusion_ema.eval()
_snake_case : Tuple = orig_model.state_dict()
_snake_case : Tuple = rename_orig_weights(lowerCAmelCase )
_snake_case : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_snake_case : List[str] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(lowerCAmelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_snake_case : Optional[int] = value.squeeze()
_snake_case : str = value
diffusers_model.load_state_dict(lowerCAmelCase )
_snake_case : Dict = 1_00
_snake_case : Optional[Any] = 33
_snake_case : Optional[Any] = IPNDMScheduler(num_train_timesteps=lowerCAmelCase )
_snake_case : Dict = torch.manual_seed(lowerCAmelCase )
_snake_case : Tuple = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase ).to(lowerCAmelCase )
_snake_case : List[Any] = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase )[:-1]
_snake_case : str = get_crash_schedule(lowerCAmelCase )
_snake_case : List[Any] = DanceDiffusionPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
_snake_case : int = torch.manual_seed(33 )
_snake_case : Tuple = pipe(num_inference_steps=lowerCAmelCase , generator=lowerCAmelCase ).audios
_snake_case : str = sampling.iplms_sample(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {} )
_snake_case : Dict = generated.clamp(-1 , 1 )
_snake_case : Optional[int] = (generated - audio).abs().sum()
_snake_case : List[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , lowerCAmelCase )
print('Diff max' , lowerCAmelCase )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase_ = parser.parse_args()
main(args)
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from torch import nn
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : str = class_size
_snake_case : Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_snake_case : Any = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Any = self.mlp(UpperCamelCase )
return logits
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase_ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCamelCase_ ( lowerCAmelCase: int )-> List[Any]:
_snake_case : Dict = None
# source code of `config_class`
_snake_case : List[Any] = inspect.getsource(lowerCAmelCase )
_snake_case : List[Any] = _re_checkpoint.findall(lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_snake_case : Optional[int] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_snake_case : Dict = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
_snake_case : Any = ckpt_name
break
return checkpoint
def lowerCamelCase_ ( )-> int:
_snake_case : List[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_snake_case : List[Any] = get_checkpoint_from_config_class(lowerCAmelCase )
_snake_case : int = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_snake_case : List[str] = '\n'.join(sorted(lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
from __future__ import annotations
lowerCAmelCase_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowerCamelCase_ ( lowerCAmelCase: list[float] )-> list[float]:
_snake_case : Tuple = []
_snake_case : Any = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
_snake_case : float = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
_snake_case : str = arr[j]
break
result.append(lowerCAmelCase )
return result
def lowerCamelCase_ ( lowerCAmelCase: list[float] )-> list[float]:
_snake_case : int = []
for i, outer in enumerate(lowerCAmelCase ):
_snake_case : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case : List[str] = inner
break
result.append(lowerCAmelCase )
return result
def lowerCamelCase_ ( lowerCAmelCase: list[float] )-> list[float]:
_snake_case : List[Any] = len(lowerCAmelCase )
_snake_case : list[float] = []
_snake_case : list[float] = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case : Any = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase_ = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = [10, 20, 30, 40, 50, 60]
_snake_case : str = [2, 4, 6, 8, 10, 12]
_snake_case : List[str] = 1_00
self.assertEqual(kp.calc_profit(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 2_10 )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase , 'max_weight must greater than zero.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase , 'Weight can not be negative.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase , 'Profit can not be negative.' )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase , 'max_weight must greater than zero.' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
import numpy as np
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: np.ndarray , lowerCAmelCase: int , lowerCAmelCase: int )-> np.ndarray:
_snake_case : Optional[Any] = np.array(lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_snake_case : Tuple = 0
_snake_case : str = 0
_snake_case : int = 0
_snake_case : Optional[Any] = 0
# compute the shape of the output matrix
_snake_case : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_snake_case : List[Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_snake_case : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_snake_case : Optional[Any] = 0
_snake_case : Dict = 0
return updated_arr
def lowerCamelCase_ ( lowerCAmelCase: np.ndarray , lowerCAmelCase: int , lowerCAmelCase: int )-> np.ndarray:
_snake_case : Dict = np.array(lowerCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_snake_case : Union[str, Any] = 0
_snake_case : Any = 0
_snake_case : List[Any] = 0
_snake_case : Dict = 0
# compute the shape of the output matrix
_snake_case : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_snake_case : List[Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_snake_case : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCAmelCase_ = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any=2 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Dict=99 , UpperCamelCase : List[Any]=36 , UpperCamelCase : Any=2 , UpperCamelCase : str=4 , UpperCamelCase : List[str]=37 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : List[Any]=5_12 , UpperCamelCase : int=16 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Optional[int]=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[int]=10_00 , ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
_snake_case : Any = batch_size
_snake_case : Union[str, Any] = num_channels
_snake_case : str = image_size
_snake_case : str = patch_size
_snake_case : List[str] = is_training
_snake_case : Optional[int] = use_input_mask
_snake_case : Any = use_token_type_ids
_snake_case : Optional[int] = use_labels
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : List[str] = type_vocab_size
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Tuple = initializer_range
_snake_case : int = coordinate_size
_snake_case : List[str] = shape_size
_snake_case : List[str] = num_labels
_snake_case : List[Any] = num_choices
_snake_case : List[str] = scope
_snake_case : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_snake_case : str = text_seq_length
_snake_case : Optional[int] = (image_size // patch_size) ** 2 + 1
_snake_case : Union[str, Any] = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_snake_case : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_snake_case : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case : Tuple = bbox[i, j, 3]
_snake_case : List[Any] = bbox[i, j, 1]
_snake_case : Tuple = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case : Dict = bbox[i, j, 2]
_snake_case : str = bbox[i, j, 0]
_snake_case : Tuple = tmp_coordinate
_snake_case : Optional[Any] = tf.constant(UpperCamelCase )
_snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : str = None
if self.use_input_mask:
_snake_case : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_snake_case : Optional[int] = None
_snake_case : List[str] = None
if self.use_labels:
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_snake_case : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Any = TFLayoutLMvaModel(config=UpperCamelCase )
# text + image
_snake_case : Tuple = model(UpperCamelCase , pixel_values=UpperCamelCase , training=UpperCamelCase )
_snake_case : str = model(
UpperCamelCase , bbox=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , training=UpperCamelCase , )
_snake_case : Any = model(UpperCamelCase , bbox=UpperCamelCase , pixel_values=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_snake_case : Optional[int] = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_snake_case : List[str] = model({'pixel_values': pixel_values} , training=UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Any = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase )
_snake_case : Tuple = model(
UpperCamelCase , bbox=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFLayoutLMvaForTokenClassification(config=UpperCamelCase )
_snake_case : str = model(
UpperCamelCase , bbox=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self : int , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 2
_snake_case : Optional[Any] = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase )
_snake_case : Any = model(
UpperCamelCase , bbox=UpperCamelCase , pixel_values=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , training=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : Optional[Any] = config_and_inputs
_snake_case : List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Any =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : Tuple =(
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
a_ : int =False
a_ : Union[str, Any] =False
a_ : Optional[int] =False
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[Any]=False ):
'''simple docstring'''
_snake_case : List[str] = copy.deepcopy(UpperCamelCase )
if model_class in get_values(UpperCamelCase ):
_snake_case : Any = {
k: tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase ):
_snake_case : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase ):
_snake_case : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_snake_case : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase ):
_snake_case : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase ):
_snake_case : Any = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = TFLayoutLMvaModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(UpperCamelCase )
if getattr(UpperCamelCase , 'hf_compute_loss' , UpperCamelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
_snake_case : str = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase , return_labels=UpperCamelCase )
_snake_case : Optional[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase )[0]
]
_snake_case : Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_snake_case : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase , return_labels=UpperCamelCase )
_snake_case : Optional[int] = prepared_for_class.pop('input_ids' )
_snake_case : Tuple = model(UpperCamelCase , **UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_snake_case : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase , return_labels=UpperCamelCase )
_snake_case : int = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_snake_case : Optional[Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_snake_case : str = -1_00
_snake_case : str = tf.convert_to_tensor(UpperCamelCase )
_snake_case : List[Any] = model(UpperCamelCase , **UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_snake_case : Tuple = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase , return_labels=UpperCamelCase )
_snake_case : Any = model(UpperCamelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_snake_case : List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase , return_labels=UpperCamelCase )
# Get keys that were added with the _prepare_for_class function
_snake_case : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
_snake_case : List[Any] = inspect.signature(model.call ).parameters
_snake_case : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_snake_case : int = {0: 'input_ids'}
for label_key in label_keys:
_snake_case : Dict = signature_names.index(UpperCamelCase )
_snake_case : int = label_key
_snake_case : int = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_snake_case : Optional[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_snake_case : int = prepared_for_class[value]
_snake_case : int = tuple(UpperCamelCase )
# Send to model
_snake_case : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : Union[str, Any] = type
self.model_tester.create_and_check_model(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase_ ( )-> int:
_snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : str = prepare_img()
_snake_case : int = image_processor(images=UpperCamelCase , return_tensors='tf' ).pixel_values
_snake_case : Optional[Any] = tf.constant([[1, 2]] )
_snake_case : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_snake_case : str = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , pixel_values=UpperCamelCase , training=UpperCamelCase )
# verify the logits
_snake_case : List[Any] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase )
_snake_case : List[Any] = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =StableUnCLIPImgaImgPipeline
a_ : Dict =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a_ : int =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : List[str] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ : int =frozenset([] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = 32
_snake_case : Tuple = embedder_hidden_size
# image encoding components
_snake_case : Tuple = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_snake_case : Any = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
_snake_case : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_snake_case : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
_snake_case : List[str] = AutoencoderKL()
_snake_case : int = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Tuple=0 , UpperCamelCase : Dict=True ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : int = torch.manual_seed(UpperCamelCase )
else:
_snake_case : str = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if pil_image:
_snake_case : List[str] = input_image * 0.5 + 0.5
_snake_case : Any = input_image.clamp(0 , 1 )
_snake_case : str = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_snake_case : Dict = DiffusionPipeline.numpy_to_pil(UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.get_dummy_components()
_snake_case : str = StableUnCLIPImgaImgPipeline(**UpperCamelCase )
_snake_case : List[str] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase )
inputs.update({'image_embeds': None} )
_snake_case : int = sd_pipe(**UpperCamelCase ).images
_snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Union[str, Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
_snake_case : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
_snake_case : int = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : Optional[int] = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
_snake_case : List[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
_snake_case : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
_snake_case : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : List[str] = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
_snake_case : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
_snake_case : Optional[int] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_snake_case : Optional[int] = pipe(
UpperCamelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
_snake_case : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase: dict , lowerCAmelCase: str , lowerCAmelCase: set , lowerCAmelCase: set , lowerCAmelCase: dict , lowerCAmelCase: dict , lowerCAmelCase: PriorityQueue , lowerCAmelCase: dict , lowerCAmelCase: float | int , )-> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_snake_case : Union[str, Any] = cst_fwd.get(lowerCAmelCase , np.inf )
_snake_case : Tuple = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_snake_case : Dict = new_cost_f
_snake_case : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_snake_case : Optional[int] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: dict , lowerCAmelCase: dict )-> int:
_snake_case : Optional[Any] = -1
_snake_case : Any = set()
_snake_case : Union[str, Any] = set()
_snake_case : List[Any] = {source: 0}
_snake_case : Tuple = {destination: 0}
_snake_case : str = {source: None}
_snake_case : Optional[Any] = {destination: None}
_snake_case : PriorityQueue[Any] = PriorityQueue()
_snake_case : PriorityQueue[Any] = PriorityQueue()
_snake_case : List[Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_snake_case , _snake_case : Optional[int] = queue_forward.get()
visited_forward.add(lowerCAmelCase )
_snake_case , _snake_case : Dict = queue_backward.get()
visited_backward.add(lowerCAmelCase )
_snake_case : Any = pass_and_relaxation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
_snake_case : Optional[Any] = pass_and_relaxation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_snake_case : Tuple = shortest_distance
return shortest_path_distance
lowerCAmelCase_ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
lowerCAmelCase_ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : int = 16 , UpperCamelCase : int = 88 , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 32 , UpperCamelCase : Optional[int] = None , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "geglu" , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCamelCase , attention_head_dim=UpperCamelCase , in_channels=UpperCamelCase , num_layers=UpperCamelCase , dropout=UpperCamelCase , norm_num_groups=UpperCamelCase , cross_attention_dim=UpperCamelCase , attention_bias=UpperCamelCase , sample_size=UpperCamelCase , num_vector_embeds=UpperCamelCase , activation_fn=UpperCamelCase , num_embeds_ada_norm=UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case : int = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case : Optional[int] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case : Optional[Any] = [1, 0]
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : int=None , UpperCamelCase : str=None , UpperCamelCase : int=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : str = hidden_states
_snake_case : Tuple = []
_snake_case : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case : str = self.transformer_index_for_condition[i]
_snake_case : int = self.transformers[transformer_index](
UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase , cross_attention_kwargs=UpperCamelCase , return_dict=UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case : str = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case : Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCamelCase )
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
lowerCAmelCase_ = """Input must be a string of 8 numbers plus letter"""
lowerCAmelCase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> bool:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = F"""Expected string as input, found {type(lowerCAmelCase ).__name__}"""
raise TypeError(lowerCAmelCase )
_snake_case : List[Any] = spanish_id.replace('-' , '' ).upper()
if len(lowerCAmelCase ) != 9:
raise ValueError(lowerCAmelCase )
try:
_snake_case : Optional[int] = int(spanish_id_clean[0:8] )
_snake_case : Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : str = checkpoints.load_tax_checkpoint(lowerCAmelCase )
_snake_case : Optional[int] = flatten_dict(lowerCAmelCase )
return flax_params
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> int:
_snake_case : Union[str, Any] = {}
_snake_case : Any = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
_snake_case : Any = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_snake_case : str = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_snake_case : Tuple = new_key.replace(lowerCAmelCase , lowerCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_snake_case : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_snake_case : str = re.sub(R'layers_(\d+)' , R'layer.\1' , lowerCAmelCase )
_snake_case : Any = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_snake_case : List[str] = re.sub(R'layers_(\d+)' , R'layer.\1' , lowerCAmelCase )
_snake_case : str = flax_dict[key]
_snake_case : Tuple = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_snake_case : str = torch.from_numpy(converted_dict[key].T )
else:
_snake_case : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Union[str, Any]=False , lowerCAmelCase: str=False )-> Any:
_snake_case : Tuple = get_flax_param(lowerCAmelCase )
if not use_large:
_snake_case : Optional[int] = PixaStructVisionConfig()
_snake_case : Dict = PixaStructTextConfig()
else:
_snake_case : Union[str, Any] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
_snake_case : Dict = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
_snake_case : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase )
_snake_case : int = PixaStructForConditionalGeneration(lowerCAmelCase )
_snake_case : str = rename_and_convert_flax_params(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
_snake_case : str = PixaStructImageProcessor()
_snake_case : Optional[Any] = PixaStructProcessor(image_processor=lowerCAmelCase , tokenizer=lowerCAmelCase )
if use_large:
_snake_case : Tuple = 40_96
_snake_case : Optional[int] = True
# mkdir if needed
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
print('Model saved in {}'.format(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
lowerCAmelCase_ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: list[str] | None = None , lowerCAmelCase: dict[str, float] | None = None , lowerCAmelCase: bool = False , )-> tuple[int, float, str]:
_snake_case : Any = cipher_alphabet or [chr(lowerCAmelCase ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_snake_case : Optional[Any] = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_snake_case : List[Any] = frequencies_dict
if not case_sensitive:
_snake_case : List[Any] = ciphertext.lower()
# Chi squared statistic values
_snake_case : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase ) ):
_snake_case : List[Any] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_snake_case : Tuple = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_snake_case : Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_snake_case : List[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_snake_case : Optional[int] = decrypted_with_shift.lower().count(lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_snake_case : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_snake_case : Union[str, Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_snake_case : str = decrypted_with_shift.count(lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_snake_case : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_snake_case : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_snake_case : List[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase: int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_snake_case : int = min(
lowerCAmelCase , key=lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_snake_case
) , (
_snake_case
) ,
) : Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Any , ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : str = 13
_snake_case : List[str] = 7
_snake_case : Dict = True
_snake_case : Any = True
_snake_case : Any = True
_snake_case : Optional[int] = 99
_snake_case : Union[str, Any] = 32
_snake_case : Dict = 2
_snake_case : List[str] = 4
_snake_case : Optional[Any] = 37
_snake_case : Union[str, Any] = 'gelu'
_snake_case : Tuple = 0.1
_snake_case : List[Any] = 0.1
_snake_case : str = 5_12
_snake_case : str = 16
_snake_case : int = 2
_snake_case : int = 0.02
_snake_case : Dict = 3
_snake_case : Optional[int] = 4
_snake_case : Tuple = None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = None
if self.use_input_mask:
_snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Dict = None
_snake_case : Any = None
_snake_case : Any = None
if self.use_labels:
_snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : int = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = self.prepare_config_and_inputs()
_snake_case : List[Any] = True
_snake_case : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : int = TFEsmModel(config=UpperCamelCase )
_snake_case : int = {'input_ids': input_ids, 'attention_mask': input_mask}
_snake_case : List[Any] = model(UpperCamelCase )
_snake_case : Any = [input_ids, input_mask]
_snake_case : str = model(UpperCamelCase )
_snake_case : Any = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
_snake_case : List[Any] = TFEsmModel(config=UpperCamelCase )
_snake_case : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_snake_case : List[Any] = model(UpperCamelCase )
_snake_case : Tuple = [input_ids, input_mask]
_snake_case : Union[str, Any] = model(UpperCamelCase , encoder_hidden_states=UpperCamelCase )
# Also check the case where encoder outputs are not passed
_snake_case : int = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Any = TFEsmForMaskedLM(config=UpperCamelCase )
_snake_case : Dict = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : Dict = TFEsmForTokenClassification(config=UpperCamelCase )
_snake_case : str = {'input_ids': input_ids, 'attention_mask': input_mask}
_snake_case : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = config_and_inputs
_snake_case : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =(
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a_ : Any =(
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Any =False
a_ : Union[str, Any] =False
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = TFEsmModelTester(self )
_snake_case : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Tuple = TFEsmModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(UpperCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_snake_case : int = model.get_bias()
assert isinstance(UpperCamelCase , UpperCamelCase )
for k, v in name.items():
assert isinstance(UpperCamelCase , tf.Variable )
else:
_snake_case : Optional[Any] = model.get_output_embeddings()
assert x is None
_snake_case : List[str] = model.get_bias()
assert name is None
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Tuple = model(UpperCamelCase )[0]
_snake_case : Tuple = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase )
# compare the actual values for a slice.
_snake_case : str = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_snake_case : Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_snake_case : Any = model(UpperCamelCase )[0]
# compare the actual values for a slice.
_snake_case : Any = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =KandinskyVaaImgaImgPipeline
a_ : int =["""image_embeds""", """negative_image_embeds""", """image"""]
a_ : List[str] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a_ : Dict =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ : Optional[Any] =False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 1_00
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case : List[str] = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_unet
_snake_case : Tuple = self.dummy_movq
_snake_case : int = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_snake_case : int = DDIMScheduler(**UpperCamelCase )
_snake_case : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_snake_case : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create init_image
_snake_case : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
_snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : Any = Image.fromarray(np.uinta(UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : Union[str, Any] = torch.manual_seed(UpperCamelCase )
else:
_snake_case : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Dict = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu'
_snake_case : str = self.get_dummy_components()
_snake_case : Union[str, Any] = self.pipeline_class(**UpperCamelCase )
_snake_case : Tuple = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[str] = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
_snake_case : Any = output.images
_snake_case : int = pipe(
**self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0]
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Union[str, Any] = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_snake_case : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_snake_case : Union[str, Any] = 'A red cartoon frog, 4k'
_snake_case : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
_snake_case : Dict = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_snake_case : Any = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case , _snake_case : List[str] = pipe_prior(
UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case : List[Any] = pipeline(
image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
_snake_case : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Dict =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple =["""input_ids""", """attention_mask"""]
a_ : Dict =RobertaTokenizer
def __init__( self : List[str] , UpperCamelCase : List[Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]="replace" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : List[Any]="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Tuple="<mask>" , UpperCamelCase : Dict=False , UpperCamelCase : str=True , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
_snake_case : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase ) != add_prefix_space:
_snake_case : List[Any] = getattr(UpperCamelCase , pre_tok_state.pop('type' ) )
_snake_case : List[str] = add_prefix_space
_snake_case : Union[str, Any] = pre_tok_class(**UpperCamelCase )
_snake_case : str = add_prefix_space
_snake_case : str = 'post_processor'
_snake_case : str = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
if tokenizer_component_instance:
_snake_case : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : Optional[int] = tuple(state['sep'] )
if "cls" in state:
_snake_case : Any = tuple(state['cls'] )
_snake_case : Optional[Any] = False
if state.get('add_prefix_space' , UpperCamelCase ) != add_prefix_space:
_snake_case : Union[str, Any] = add_prefix_space
_snake_case : Any = True
if state.get('trim_offsets' , UpperCamelCase ) != trim_offsets:
_snake_case : Union[str, Any] = trim_offsets
_snake_case : Optional[Any] = True
if changes_to_apply:
_snake_case : int = getattr(UpperCamelCase , state.pop('type' ) )
_snake_case : Any = component_class(**UpperCamelCase )
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value
_snake_case : Tuple = value
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = kwargs.get('is_split_into_words' , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = kwargs.get('is_split_into_words' , UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Any = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
_snake_case : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Any = [self.sep_token_id]
_snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: float )-> float:
return 10 - x * x
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: float )-> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCAmelCase ) * equation(lowerCAmelCase ) >= 0:
raise ValueError('Wrong space!' )
_snake_case : Optional[int] = a
while (b - a) >= 0.0_1:
# Find middle point
_snake_case : Any = (a + b) / 2
# Check if middle point is root
if equation(lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCAmelCase ) * equation(lowerCAmelCase ) < 0:
_snake_case : Optional[Any] = c
else:
_snake_case : Optional[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase_ = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
lowerCAmelCase_ = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
lowerCAmelCase_ = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
lowerCAmelCase_ = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
lowerCAmelCase_ = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : str=[1, 10, 1_00] , UpperCamelCase : Optional[int]=4 , UpperCamelCase : str=3.0 ):
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=UpperCamelCase ) as executor:
_snake_case : Tuple = []
_snake_case : Optional[Any] = Counter()
_snake_case : Any = 0
_snake_case : int = defaultdict(UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase , UpperCamelCase ) ):
for candidate in candidates:
_snake_case : Optional[Any] = candidate + '\n' + test_case
_snake_case : Tuple = (test_program, timeout, task_id, completion_id[task_id])
_snake_case : Any = executor.submit(UpperCamelCase , *UpperCamelCase )
futures.append(UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase ):
_snake_case : str = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
_snake_case , _snake_case : List[Any] = [], []
for result in results.values():
result.sort()
_snake_case : Union[str, Any] = [r[1]['passed'] for r in result]
total.append(len(UpperCamelCase ) )
correct.append(sum(UpperCamelCase ) )
_snake_case : Dict = np.array(UpperCamelCase )
_snake_case : List[str] = np.array(UpperCamelCase )
_snake_case : Optional[Any] = k
_snake_case : Optional[Any] = {f"""pass@{k}""": estimate_pass_at_k(UpperCamelCase , UpperCamelCase , UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> int:
def estimator(lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = itertools.repeat(lowerCAmelCase , len(lowerCAmelCase ) )
else:
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
_snake_case : Union[str, Any] = iter(lowerCAmelCase )
return np.array([estimator(int(lowerCAmelCase ) , int(lowerCAmelCase ) , lowerCAmelCase ) for n, c in zip(lowerCAmelCase , lowerCAmelCase )] )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase_ = """__DUMMY_TRANSFORMERS_USER__"""
lowerCAmelCase_ = """Dummy User"""
lowerCAmelCase_ = """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"""
lowerCAmelCase_ = """https://hub-ci.huggingface.co"""
lowerCAmelCase_ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCAmelCase_ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCAmelCase_ = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Union[str, Any]:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , lowerCAmelCase )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Any:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , lowerCAmelCase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , lowerCAmelCase )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> List[Any]:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , lowerCAmelCase )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: str )-> List[Any]:
HfFolder.save_token(lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( )-> Tuple:
return HfApi(endpoint=lowerCAmelCase )
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( lowerCAmelCase: HfApi )-> List[str]:
_snake_case : List[Any] = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
def _cleanup_repo(lowerCAmelCase: Tuple ):
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
@contextmanager
def _temporary_repo(lowerCAmelCase: Dict ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( lowerCAmelCase: HfApi , lowerCAmelCase: Any , lowerCAmelCase: List[str] )-> Optional[int]:
_snake_case : Dict = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
_snake_case : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo='data/text_data.txt' , repo_id=lowerCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: List[str] , lowerCAmelCase: List[str] )-> str:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( lowerCAmelCase: HfApi , lowerCAmelCase: List[Any] , lowerCAmelCase: Any )-> List[str]:
_snake_case : Dict = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
_snake_case : Any = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo='data.zip' , repo_id=lowerCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> List[Any]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def lowerCamelCase_ ( lowerCAmelCase: HfApi , lowerCAmelCase: str , lowerCAmelCase: Optional[int] )-> int:
_snake_case : Any = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
_snake_case : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo='data.zip' , repo_id=lowerCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: List[str] , lowerCAmelCase: Optional[int] )-> Dict:
return hf_private_dataset_repo_zipped_img_data_
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = {}
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , ' -> ' , ' -> '.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
_snake_case : str = [to_vertex]
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : list ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
print(UpperCamelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
_snake_case : Optional[int] = []
_snake_case : str = list(UpperCamelCase )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Optional[int] ):
'''simple docstring'''
return "(" + ",".join(map(UpperCamelCase , self.__components ) ) + ")"
def __add__( self : List[Any] , UpperCamelCase : Vector ):
'''simple docstring'''
_snake_case : Dict = len(self )
if size == len(UpperCamelCase ):
_snake_case : str = [self.__components[i] + other.component(UpperCamelCase ) for i in range(UpperCamelCase )]
return Vector(UpperCamelCase )
else:
raise Exception('must have the same size' )
def __sub__( self : List[str] , UpperCamelCase : Vector ):
'''simple docstring'''
_snake_case : Optional[int] = len(self )
if size == len(UpperCamelCase ):
_snake_case : List[str] = [self.__components[i] - other.component(UpperCamelCase ) for i in range(UpperCamelCase )]
return Vector(UpperCamelCase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Optional[int] , UpperCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Dict , UpperCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : Union[str, Any] , UpperCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(UpperCamelCase , (float, int) ):
_snake_case : List[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ) and len(self ) == len(UpperCamelCase ):
_snake_case : List[Any] = len(self )
_snake_case : Tuple = [self.__components[i] * other.component(UpperCamelCase ) for i in range(UpperCamelCase )]
return sum(UpperCamelCase )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return Vector(self.__components )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_snake_case : str = value
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
_snake_case : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Vector , UpperCamelCase : bool = False ):
'''simple docstring'''
_snake_case : List[Any] = self * other
_snake_case : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCamelCase_ ( lowerCAmelCase: int )-> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
return Vector([0] * dimension )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int )-> Vector:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase ))
_snake_case : Any = [0] * dimension
_snake_case : str = 1
return Vector(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: Vector , lowerCAmelCase: Vector )-> Vector:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (isinstance(lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> Vector:
random.seed(lowerCAmelCase )
_snake_case : Any = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , UpperCamelCase : list[list[float]] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Any = matrix
_snake_case : Union[str, Any] = w
_snake_case : Tuple = h
def __str__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Union[str, Any] , UpperCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_snake_case : List[Any] = []
for i in range(self.__height ):
_snake_case : int = [
self.__matrix[i][j] + other.component(UpperCamelCase , UpperCamelCase )
for j in range(self.__width )
]
matrix.append(UpperCamelCase )
return Matrix(UpperCamelCase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : List[str] , UpperCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_snake_case : Any = []
for i in range(self.__height ):
_snake_case : List[str] = [
self.__matrix[i][j] - other.component(UpperCamelCase , UpperCamelCase )
for j in range(self.__width )
]
matrix.append(UpperCamelCase )
return Matrix(UpperCamelCase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : str , UpperCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : str , UpperCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : Union[str, Any] , UpperCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ): # matrix-vector
if len(UpperCamelCase ) == self.__width:
_snake_case : Union[str, Any] = zero_vector(self.__height )
for i in range(self.__height ):
_snake_case : Tuple = [
self.__matrix[i][j] * other.component(UpperCamelCase )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase , sum(UpperCamelCase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase , (int, float) ): # matrix-scalar
_snake_case : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase , self.__width , self.__height )
return None
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.__height
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self.__width
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_snake_case : Union[str, Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
_snake_case : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase ) ):
_snake_case : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase , UpperCamelCase )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_snake_case : str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase ) for y in range(self.__width )
]
return sum(UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase: int )-> Matrix:
_snake_case : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int )-> Matrix:
random.seed(lowerCAmelCase )
_snake_case : list[list[float]] = [
[random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
lowerCAmelCase_ = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
lowerCAmelCase_ = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
lowerCAmelCase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
lowerCAmelCase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] =CodeGenTokenizer
a_ : Optional[int] =CodeGenTokenizerFast
a_ : Optional[int] =True
a_ : Union[str, Any] ={"""add_prefix_space""": True}
a_ : int =False
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_snake_case : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : Optional[int] = {'unk_token': '<unk>'}
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Any = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : Any = 'lower newer'
_snake_case : str = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : List[Any] = tokenizer.tokenize(UpperCamelCase , add_prefix_space=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_snake_case : Any = self.get_tokenizer()
_snake_case : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = 'lower newer'
# Testing tokenization
_snake_case : Union[str, Any] = tokenizer.tokenize(UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids without special tokens
_snake_case : Optional[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : str = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing conversion to ids with special tokens
_snake_case : Tuple = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
_snake_case : str = tokenizer.encode(UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# Testing the unknown token
_snake_case : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
_snake_case : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Dict=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
# Simple input
_snake_case : Union[str, Any] = 'This is a simple input'
_snake_case : Dict = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : Optional[Any] = ('This is a simple input', 'This is a pair')
_snake_case : int = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding='max_length' , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_snake_case : Optional[int] = 'This is a simple input'
_snake_case : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
_snake_case : int = ('This is a simple input', 'This is a pair')
_snake_case : List[str] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_snake_case : Union[str, Any] = tokenizer.pad_token_id
_snake_case : Any = tokenizer(UpperCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
_snake_case : Dict = tokenizer(UpperCamelCase , padding=UpperCamelCase , truncate=UpperCamelCase , return_tensors='np' )
_snake_case : Optional[Any] = tokenizer(*UpperCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
_snake_case : Optional[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , truncate=UpperCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = '$$$'
_snake_case : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCamelCase , add_bos_token=UpperCamelCase )
_snake_case : Tuple = 'This is a simple input'
_snake_case : Tuple = ['This is a simple input 1', 'This is a simple input 2']
_snake_case : List[Any] = tokenizer.bos_token_id
_snake_case : int = tokenizer(UpperCamelCase )
_snake_case : Tuple = tokenizer(UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_snake_case : Union[str, Any] = tokenizer.decode(out_s.input_ids )
_snake_case : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
_snake_case : Any = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_snake_case : Optional[Any] = '\nif len_a > len_b: result = a\nelse: result = b'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : Optional[Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
_snake_case : Optional[Any] = tokenizer.decode(UpperCamelCase , truncate_before_pattern=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: list[int] , lowerCAmelCase: int )-> list[list[int]]:
_snake_case : list[list[int]] = []
_snake_case : list[int] = []
_snake_case : int = 0
_snake_case : Tuple = sum(lowerCAmelCase )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return result
def lowerCamelCase_ ( lowerCAmelCase: list[int] , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[int] , lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , )-> None:
if sum(lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase )) < max_sum:
return
if sum(lowerCAmelCase ) == max_sum:
result.append(lowerCAmelCase )
return
for index in range(lowerCAmelCase , len(lowerCAmelCase ) ):
create_state_space_tree(
lowerCAmelCase , lowerCAmelCase , index + 1 , [*path, nums[index]] , lowerCAmelCase , remaining_nums_sum - nums[index] , )
lowerCAmelCase_ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase_ = 9
lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
lowerCAmelCase_ = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase_ = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCAmelCase_ = requests.get(image_url).content
lowerCAmelCase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase_ = re.compile(r"""^\s*else:""")
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> str:
if _re_test_backend.search(lowerCAmelCase ) is None:
return None
_snake_case : Tuple = [b[0] for b in _re_backend.findall(lowerCAmelCase )]
backends.sort()
return "_and_".join(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Optional[Any]:
with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_snake_case : int = f.readlines()
_snake_case : List[str] = 0
while line_index < len(lowerCAmelCase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case : Union[str, Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_snake_case : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase ):
_snake_case : int = _re_one_line_import_struct.search(lowerCAmelCase ).groups()[0]
_snake_case : Any = re.findall(R'\[([^\]]+)\]' , lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_snake_case : List[Any] = _re_import_struct_key_value.search(lowerCAmelCase )
if single_line_import_search is not None:
_snake_case : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowerCAmelCase ) > 0]
objects.extend(lowerCAmelCase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_snake_case : Optional[int] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_snake_case : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase ) is not None:
_snake_case : Optional[int] = _re_import_struct_add_many.search(lowerCAmelCase ).groups()[0].split(', ' )
_snake_case : Optional[int] = [obj[1:-1] for obj in imports if len(lowerCAmelCase ) > 0]
objects.extend(lowerCAmelCase )
elif _re_between_brackets.search(lowerCAmelCase ) is not None:
_snake_case : str = _re_between_brackets.search(lowerCAmelCase ).groups()[0].split(', ' )
_snake_case : int = [obj[1:-1] for obj in imports if len(lowerCAmelCase ) > 0]
objects.extend(lowerCAmelCase )
elif _re_quote_object.search(lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
_snake_case : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case : str = []
while (
line_index < len(lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_snake_case : str = lines[line_index]
_snake_case : List[Any] = _re_import.search(lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case : Dict = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_snake_case : List[str] = lines[line_index]
_snake_case : str = _re_import.search(lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Optional[int]:
def find_duplicates(lowerCAmelCase: Any ):
return [k for k, v in collections.Counter(lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case : Optional[int] = []
for key in import_dict_objects.keys():
_snake_case : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_snake_case : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case : Tuple = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase_ ( )-> Optional[Any]:
_snake_case : Optional[int] = []
for root, _, files in os.walk(lowerCAmelCase ):
if "__init__.py" in files:
_snake_case : Any = os.path.join(lowerCAmelCase , '__init__.py' )
_snake_case : Dict = parse_init(lowerCAmelCase )
if objects is not None:
_snake_case : Optional[Any] = analyze_results(*lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_snake_case : List[str] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(lowerCAmelCase ) )
if len(lowerCAmelCase ) > 0:
raise ValueError('\n\n'.join(lowerCAmelCase ) )
def lowerCamelCase_ ( )-> Dict:
_snake_case : Union[str, Any] = []
for path, directories, files in os.walk(lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase ) / folder).glob('*.py' ) ) ) == 0:
continue
_snake_case : Union[str, Any] = str((Path(lowerCAmelCase ) / folder).relative_to(lowerCAmelCase ) )
_snake_case : Dict = short_path.replace(os.path.sep , '.' )
submodules.append(lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_snake_case : Any = str((Path(lowerCAmelCase ) / fname).relative_to(lowerCAmelCase ) )
_snake_case : Optional[Any] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowerCAmelCase )
return submodules
lowerCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def lowerCamelCase_ ( )-> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_snake_case : Dict = direct_transformers_import(lowerCAmelCase )
_snake_case : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCAmelCase , '__init__.py' ) , 'r' ) as f:
_snake_case : Union[str, Any] = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , lowerCAmelCase ) ) )
_snake_case : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCAmelCase ) > 0:
_snake_case : str = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar("""T""")
lowerCAmelCase_ = TypeVar("""U""")
class _lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : T | None , UpperCamelCase : U | None ):
'''simple docstring'''
_snake_case : Optional[Any] = key
_snake_case : List[Any] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
_snake_case , _snake_case : List[str] = self.rear, self.head
def __repr__( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = ['DoubleLinkedList']
_snake_case : Dict = self.head
while node.next is not None:
rep.append(str(UpperCamelCase ) )
_snake_case : Dict = node.next
rep.append(str(self.rear ) )
return ",\n ".join(UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
_snake_case : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Tuple = node
_snake_case : int = previous
_snake_case : int = node
_snake_case : int = self.rear
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : DoubleLinkedListNode[T, U] ):
'''simple docstring'''
if node.prev is None or node.next is None:
return None
_snake_case : str = node.next
_snake_case : List[str] = node.prev
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
return node
class _lowerCAmelCase ( Generic[T, U] ):
'''simple docstring'''
a_ : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__( self : List[Any] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Dict = capacity
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
_snake_case : Optional[Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Dict ):
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[str] , UpperCamelCase : T ):
'''simple docstring'''
return key in self.cache
def UpperCamelCase_ ( self : Any , UpperCamelCase : T ):
'''simple docstring'''
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(UpperCamelCase )
return node.val
self.miss += 1
return None
def UpperCamelCase_ ( self : int , UpperCamelCase : T , UpperCamelCase : U ):
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(UpperCamelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Any = DoubleLinkedListNode(UpperCamelCase , UpperCamelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : str , UpperCamelCase : int = 1_28 ):
'''simple docstring'''
def cache_decorator_inner(UpperCamelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*UpperCamelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Union[str, Any] = LRUCache(UpperCamelCase )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Any = func(*UpperCamelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , UpperCamelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(UpperCamelCase , 'cache_info' , UpperCamelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCAmelCase_ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCAmelCase_ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCAmelCase_ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : str =["""image_processor""", """tokenizer"""]
a_ : Union[str, Any] ="""BlipImageProcessor"""
a_ : Dict ="""AutoTokenizer"""
def __init__( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
# add QFormer tokenizer
_snake_case : Optional[int] = qformer_tokenizer
def __call__( self : Dict , UpperCamelCase : ImageInput = None , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_snake_case : List[Any] = BatchFeature()
if text is not None:
_snake_case : List[Any] = self.tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
encoding.update(UpperCamelCase )
_snake_case : str = self.qformer_tokenizer(
text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
_snake_case : List[str] = qformer_text_encoding.pop('input_ids' )
_snake_case : Optional[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase )
encoding.update(UpperCamelCase )
return encoding
def UpperCamelCase_ ( self : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
if os.path.isfile(UpperCamelCase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
_snake_case : Any = os.path.join(UpperCamelCase , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(UpperCamelCase )
return super().save_pretrained(UpperCamelCase , **UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Tuple , UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained(UpperCamelCase , subfolder='qformer_tokenizer' )
_snake_case : str = cls._get_arguments_from_pretrained(UpperCamelCase , **UpperCamelCase )
args.append(UpperCamelCase )
return cls(*UpperCamelCase )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Any=13 , UpperCamelCase : int=7 , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : int=True , UpperCamelCase : Any=True , UpperCamelCase : Optional[int]=99 , UpperCamelCase : List[str]=16 , UpperCamelCase : Any=36 , UpperCamelCase : int=6 , UpperCamelCase : Optional[Any]=6 , UpperCamelCase : Union[str, Any]=6 , UpperCamelCase : int=37 , UpperCamelCase : Any="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=5_12 , UpperCamelCase : str=16 , UpperCamelCase : Dict=2 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Any=None , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Dict = batch_size
_snake_case : List[Any] = seq_length
_snake_case : Optional[int] = is_training
_snake_case : Optional[Any] = use_input_mask
_snake_case : Any = use_token_type_ids
_snake_case : List[str] = use_labels
_snake_case : str = vocab_size
_snake_case : Dict = embedding_size
_snake_case : List[Any] = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Any = num_hidden_groups
_snake_case : Tuple = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Optional[Any] = type_vocab_size
_snake_case : Tuple = type_sequence_label_size
_snake_case : Optional[Any] = initializer_range
_snake_case : Any = num_labels
_snake_case : str = num_choices
_snake_case : Union[str, Any] = scope
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Tuple = None
if self.use_input_mask:
_snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Any = None
if self.use_token_type_ids:
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : str = None
_snake_case : int = None
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AlbertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
_snake_case : Union[str, Any] = model(UpperCamelCase , token_type_ids=UpperCamelCase )
_snake_case : Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = AlbertForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : int = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , sentence_order_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = AlbertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[str] = AlbertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Tuple = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = self.num_labels
_snake_case : Optional[Any] = AlbertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : int = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Dict = self.num_labels
_snake_case : Optional[Any] = AlbertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Tuple = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = self.num_choices
_snake_case : Dict = AlbertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
_snake_case : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Optional[int] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : str = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : List[str] = config_and_inputs
_snake_case : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Dict =(
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[str] =True
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Tuple=False ):
'''simple docstring'''
_snake_case : int = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
_snake_case : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
_snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = AlbertModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case : int = type
self.model_tester.create_and_check_model(*UpperCamelCase )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : int = AlbertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = AlbertModel.from_pretrained('albert-base-v2' )
_snake_case : Optional[int] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_snake_case : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
_snake_case : Union[str, Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCamelCase )
_snake_case : List[str] = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] ="""xlm-roberta"""
def __init__( self : str , UpperCamelCase : str=3_05_22 , UpperCamelCase : str=7_68 , UpperCamelCase : List[str]=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[str]=30_72 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=5_12 , UpperCamelCase : Any=2 , UpperCamelCase : int=0.02 , UpperCamelCase : Union[str, Any]=1e-1_2 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : List[str]=2 , UpperCamelCase : List[str]="absolute" , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : List[Any] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Union[str, Any] = hidden_act
_snake_case : str = intermediate_size
_snake_case : Dict = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : Optional[Any] = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : Union[str, Any] = position_embedding_type
_snake_case : Tuple = use_cache
_snake_case : int = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any]=5 )-> int:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_snake_case : Any = torch.tensor(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_snake_case : Optional[Any] = model(lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_snake_case : Union[str, Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_snake_case : List[Any] = logits[0, masked_index, :]
_snake_case : Tuple = logits.softmax(dim=0 )
_snake_case , _snake_case : Tuple = prob.topk(k=lowerCAmelCase , dim=0 )
_snake_case : List[Any] = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase ) )] )
_snake_case : Tuple = tokenizer.mask_token
_snake_case : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_snake_case : Dict = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(lowerCAmelCase ) , lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase , lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase_ = CamembertTokenizer.from_pretrained("""camembert-base""")
lowerCAmelCase_ = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
lowerCAmelCase_ = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Image , lowerCAmelCase: float )-> Image:
def brightness(lowerCAmelCase: int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowerCAmelCase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Any =GPTSwaTokenizer
a_ : Dict =False
a_ : Any =True
a_ : Optional[int] =False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Union[str, Any] = GPTSwaTokenizer(UpperCamelCase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : str = 'This is a test'
_snake_case : Tuple = 'This is a test'
return input_text, output_text
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = '<s>'
_snake_case : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCamelCase ) , 20_00 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = GPTSwaTokenizer(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
_snake_case : Dict = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
UpperCamelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
_snake_case : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase )
self.assertListEqual(
UpperCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCamelCase )
# fmt: off
self.assertListEqual(
UpperCamelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = GPTSwaTokenizer(UpperCamelCase )
_snake_case : Tuple = ['This is a test', 'I was born in 92000, and this is falsé.']
_snake_case : Dict = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase ) , UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase ) , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
_snake_case : int = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=UpperCamelCase , )
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 669 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 1 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
import pytest
lowerCAmelCase_ = """__dummy_dataset1__"""
lowerCAmelCase_ = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowerCamelCase_ ( )-> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase_ ( )-> Dict:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Any , lowerCAmelCase: Any )-> Optional[int]:
_snake_case : List[Any] = dataset_loading_script_name
_snake_case : List[Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=lowerCAmelCase )
_snake_case : int = script_dir / F"""{script_name}.py"""
with open(lowerCAmelCase , 'w' ) as f:
f.write(lowerCAmelCase )
return str(lowerCAmelCase )
| 669 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import math
def lowerCamelCase_ ( lowerCAmelCase: int )-> list[int]:
_snake_case : str = []
_snake_case : Optional[int] = 2
_snake_case : int = int(math.sqrt(lowerCAmelCase ) ) # Size of every segment
_snake_case : List[Any] = [True] * (end + 1)
_snake_case : Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase )
for i in range(start * start , end + 1 , lowerCAmelCase ):
_snake_case : int = False
start += 1
prime += in_prime
_snake_case : Any = end + 1
_snake_case : int = min(2 * end , lowerCAmelCase )
while low <= n:
_snake_case : List[str] = [True] * (high - low + 1)
for each in in_prime:
_snake_case : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase , high + 1 , lowerCAmelCase ):
_snake_case : Tuple = False
for j in range(len(lowerCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
_snake_case : List[Any] = high + 1
_snake_case : Tuple = min(high + end , lowerCAmelCase )
return prime
print(sieve(10**6))
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[int]=0 ):
'''simple docstring'''
_snake_case : str = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCamelCase ) )
_snake_case : Optional[Any] = np.random.RandomState(UpperCamelCase )
_snake_case : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Optional[Any] = self.get_dummy_inputs()
_snake_case : Optional[int] = pipe(**UpperCamelCase ).images
_snake_case : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case : Optional[Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Dict = self.get_dummy_inputs()
_snake_case : Optional[int] = pipe(**UpperCamelCase ).images
_snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case : Union[str, Any] = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# warmup pass to apply optimizations
_snake_case : int = pipe(**self.get_dummy_inputs() )
_snake_case : List[Any] = self.get_dummy_inputs()
_snake_case : Dict = pipe(**UpperCamelCase ).images
_snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case : List[Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[Any] = self.get_dummy_inputs()
_snake_case : List[str] = pipe(**UpperCamelCase ).images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case : Optional[Any] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Any = self.get_dummy_inputs()
_snake_case : List[Any] = pipe(**UpperCamelCase ).images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case : Any = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_snake_case : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Union[str, Any] = self.get_dummy_inputs()
_snake_case : Optional[int] = pipe(**UpperCamelCase ).images
_snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_snake_case : Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : int = ort.SessionOptions()
_snake_case : str = False
return options
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case : List[Any] = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
_snake_case : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Any = 'A fantasy landscape, trending on artstation'
_snake_case : int = np.random.RandomState(0 )
_snake_case : Optional[int] = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase , output_type='np' , )
_snake_case : int = output.images
_snake_case : Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_snake_case : List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_snake_case : Any = init_image.resize((7_68, 5_12) )
_snake_case : Optional[int] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_snake_case : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : Optional[int] = 'A fantasy landscape, trending on artstation'
_snake_case : Any = np.random.RandomState(0 )
_snake_case : List[str] = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase , output_type='np' , )
_snake_case : int = output.images
_snake_case : Tuple = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
_snake_case : Union[str, Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] ="""perceiver"""
def __init__( self : Dict , UpperCamelCase : int=2_56 , UpperCamelCase : str=12_80 , UpperCamelCase : List[str]=7_68 , UpperCamelCase : Tuple=1 , UpperCamelCase : Optional[int]=26 , UpperCamelCase : Tuple=8 , UpperCamelCase : Dict=8 , UpperCamelCase : Dict=None , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]="kv" , UpperCamelCase : List[Any]=1 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : int=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : List[str]=True , UpperCamelCase : Any=2_62 , UpperCamelCase : Any=20_48 , UpperCamelCase : Optional[Any]=56 , UpperCamelCase : Any=[3_68, 4_96] , UpperCamelCase : List[Any]=16 , UpperCamelCase : List[str]=19_20 , UpperCamelCase : int=16 , UpperCamelCase : Any=[1, 16, 2_24, 2_24] , **UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Optional[int] = num_latents
_snake_case : str = d_latents
_snake_case : Dict = d_model
_snake_case : Optional[int] = num_blocks
_snake_case : int = num_self_attends_per_block
_snake_case : Optional[int] = num_self_attention_heads
_snake_case : Tuple = num_cross_attention_heads
_snake_case : Tuple = qk_channels
_snake_case : List[str] = v_channels
_snake_case : int = cross_attention_shape_for_attention
_snake_case : Union[str, Any] = self_attention_widening_factor
_snake_case : Tuple = cross_attention_widening_factor
_snake_case : List[str] = hidden_act
_snake_case : str = attention_probs_dropout_prob
_snake_case : Optional[Any] = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Optional[int] = use_query_residual
# masked language modeling attributes
_snake_case : Tuple = vocab_size
_snake_case : Optional[Any] = max_position_embeddings
# image classification attributes
_snake_case : Union[str, Any] = image_size
# flow attributes
_snake_case : str = train_size
# multimodal autoencoding attributes
_snake_case : int = num_frames
_snake_case : List[Any] = audio_samples_per_frame
_snake_case : int = samples_per_patch
_snake_case : List[str] = output_shape
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 1e-4
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , UpperCamelCase : int = 3 , UpperCamelCase : int = 40 , UpperCamelCase : int = 40 , ):
'''simple docstring'''
if isinstance(UpperCamelCase , UpperCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : Tuple = compute_effective_axis_dimension(
UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : List[str] = preprocessor.num_special_tokens_to_add(UpperCamelCase )
_snake_case : Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
_snake_case : List[str] = [' '.join(['a'] ) * seq_length] * batch_size
_snake_case : List[Any] = dict(preprocessor(UpperCamelCase , return_tensors=UpperCamelCase ) )
_snake_case : List[str] = inputs.pop('input_ids' )
return inputs
elif isinstance(UpperCamelCase , UpperCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : Optional[int] = compute_effective_axis_dimension(UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
_snake_case : Optional[int] = self._generate_dummy_images(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_snake_case : Any = dict(preprocessor(images=UpperCamelCase , return_tensors=UpperCamelCase ) )
_snake_case : Optional[Any] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: int = 1_00 )-> int:
_snake_case : str = n * (n + 1) * (2 * n + 1) / 6
_snake_case : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase_ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Tuple , lowerCAmelCase: Tuple )-> Union[str, Any]:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_snake_case : Union[str, Any] = 'lm_head'
_snake_case : str = getattr(lowerCAmelCase , lowerCAmelCase )
if weight_type is not None:
_snake_case : Tuple = getattr(lowerCAmelCase , lowerCAmelCase ).shape
else:
_snake_case : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_snake_case : List[Any] = value
elif weight_type == "weight_g":
_snake_case : List[Any] = value
elif weight_type == "weight_v":
_snake_case : str = value
elif weight_type == "bias":
_snake_case : List[Any] = value
else:
_snake_case : Tuple = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: List[str] , lowerCAmelCase: Any )-> int:
_snake_case : str = []
_snake_case : str = fairseq_model.state_dict()
_snake_case : Tuple = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
_snake_case : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_snake_case : Any = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case : Optional[int] = True
if "*" in mapped_key:
_snake_case : List[Any] = name.split(lowerCAmelCase )[0].split('.' )[-2]
_snake_case : Optional[Any] = mapped_key.replace('*' , lowerCAmelCase )
if "weight_g" in name:
_snake_case : Optional[Any] = 'weight_g'
elif "weight_v" in name:
_snake_case : Dict = 'weight_v'
elif "bias" in name:
_snake_case : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : List[str] = 'weight'
else:
_snake_case : Optional[int] = None
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Tuple , lowerCAmelCase: Optional[Any] )-> Union[str, Any]:
_snake_case : Optional[Any] = full_name.split('conv_layers.' )[-1]
_snake_case : Dict = name.split('.' )
_snake_case : List[Any] = int(items[0] )
_snake_case : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_snake_case : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_snake_case : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_snake_case : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_snake_case : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: str=None , lowerCAmelCase: Any=True )-> int:
if config_path is not None:
_snake_case : Any = UniSpeechConfig.from_pretrained(lowerCAmelCase )
else:
_snake_case : Union[str, Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
_snake_case : Any = Dictionary.load_from_json(lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : Union[str, Any] = target_dict.pad_index
_snake_case : str = target_dict.bos_index
_snake_case : Dict = target_dict.eos_index
_snake_case : Optional[Any] = len(target_dict.symbols )
_snake_case : Tuple = os.path.join(lowerCAmelCase , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase ) )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : Union[str, Any] = 42
_snake_case : List[str] = 43
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase , lowerCAmelCase )
_snake_case : Optional[Any] = WavaVecaPhonemeCTCTokenizer(
lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase , )
_snake_case : List[str] = True if config.feat_extract_norm == 'layer' else False
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
_snake_case : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase , tokenizer=lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
_snake_case : Dict = UniSpeechForCTC(lowerCAmelCase )
else:
_snake_case : Dict = UniSpeechForPreTraining(lowerCAmelCase )
if is_finetuned:
_snake_case , _snake_case , _snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_snake_case , _snake_case , _snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_snake_case : Tuple = model[0].eval()
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
hf_unispeech.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 669 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase_ = Mapping[str, np.ndarray]
lowerCAmelCase_ = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase_ = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase_ )
class _lowerCAmelCase :
'''simple docstring'''
a_ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
a_ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
a_ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
a_ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
a_ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
a_ : Optional[np.ndarray] =None
# Optional remark about the protein. Included as a comment in output PDB
# files
a_ : Optional[str] =None
# Templates used to generate this protein (prediction-only)
a_ : Optional[Sequence[str]] =None
# Chain corresponding to each parent
a_ : Optional[Sequence[int]] =None
def lowerCamelCase_ ( lowerCAmelCase: str )-> Protein:
_snake_case : List[Any] = R'(\[[A-Z]+\]\n)'
_snake_case : List[str] = [tag.strip() for tag in re.split(lowerCAmelCase , lowerCAmelCase ) if len(lowerCAmelCase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : List[Any] = None
_snake_case : Tuple = None
_snake_case : Dict = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Optional[Any] = g[1][0].strip()
for i in range(len(lowerCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : int = 'X' # FIXME: strings are immutable
_snake_case : Optional[int] = np.array(
[residue_constants.restype_order.get(lowerCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(lowerCAmelCase , g[1][axis].split() ) ) )
_snake_case : Optional[int] = np.array(lowerCAmelCase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : List[Any] = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
_snake_case : Union[str, Any] = np.zeros(
(
len(lowerCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase ):
_snake_case : Optional[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCAmelCase , atom_mask=lowerCAmelCase , aatype=lowerCAmelCase , residue_index=np.arange(len(lowerCAmelCase ) ) , b_factors=lowerCAmelCase , )
def lowerCamelCase_ ( lowerCAmelCase: Protein , lowerCAmelCase: int = 0 )-> List[str]:
_snake_case : List[str] = []
_snake_case : Dict = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : Optional[Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : Union[str, Any] = [p for i, p in zip(lowerCAmelCase , lowerCAmelCase ) if i == chain_id]
if parents is None or len(lowerCAmelCase ) == 0:
_snake_case : Optional[int] = ['N/A']
pdb_headers.append(F"""PARENT {' '.join(lowerCAmelCase )}""" )
return pdb_headers
def lowerCamelCase_ ( lowerCAmelCase: Protein , lowerCAmelCase: str )-> str:
_snake_case : List[str] = []
_snake_case : Union[str, Any] = pdb_str.split('\n' )
_snake_case : Any = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : Any = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCAmelCase ) , [] )
parent_dict[str(lowerCAmelCase )].append(lowerCAmelCase )
_snake_case : Any = max([int(lowerCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : List[Any] = parent_dict.get(str(lowerCAmelCase ) , ['N/A'] )
parents_per_chain.append(lowerCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : Optional[Any] = [['N/A']]
def make_parent_line(lowerCAmelCase: Sequence[str] ) -> str:
return F"""PARENT {' '.join(lowerCAmelCase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : Optional[int] = 0
for i, l in enumerate(lowerCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCAmelCase ):
_snake_case : int = parents_per_chain[chain_counter]
else:
_snake_case : Dict = ['N/A']
out_pdb_lines.append(make_parent_line(lowerCAmelCase ) )
return "\n".join(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Protein )-> str:
_snake_case : int = residue_constants.restypes + ['X']
def res_atoa(lowerCAmelCase: int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
_snake_case : str = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Any = prot.atom_mask
_snake_case : Optional[int] = prot.aatype
_snake_case : str = prot.atom_positions
_snake_case : Tuple = prot.residue_index.astype(np.intaa )
_snake_case : List[str] = prot.b_factors
_snake_case : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
_snake_case : Any = get_pdb_headers(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
pdb_lines.extend(lowerCAmelCase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : Optional[Any] = 1
_snake_case : Union[str, Any] = 0
_snake_case : int = string.ascii_uppercase
_snake_case : str = None
# Add all atom sites.
for i in range(lowerCAmelCase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : Optional[Any] = 'ATOM'
_snake_case : Tuple = atom_name if len(lowerCAmelCase ) == 4 else F""" {atom_name}"""
_snake_case : Dict = ''
_snake_case : Any = ''
_snake_case : int = 1.0_0
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : List[str] = ''
_snake_case : List[Any] = 'A'
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : str = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(lowerCAmelCase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : str = True
_snake_case : Optional[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : int = 'TER'
_snake_case : List[str] = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(lowerCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCAmelCase , lowerCAmelCase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Protein )-> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCamelCase_ ( lowerCAmelCase: FeatureDict , lowerCAmelCase: ModelOutput , lowerCAmelCase: Optional[np.ndarray] = None , lowerCAmelCase: Optional[np.ndarray] = None , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: Optional[Sequence[str]] = None , lowerCAmelCase: Optional[Sequence[int]] = None , )-> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=lowerCAmelCase , remark=lowerCAmelCase , parents=lowerCAmelCase , parents_chain_index=lowerCAmelCase , )
| 669 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : List[str] =PegasusTokenizer
a_ : List[Any] =PegasusTokenizerFast
a_ : List[Any] =True
a_ : List[str] =True
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : str = PegasusTokenizer(UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def UpperCamelCase_ ( self : int , **UpperCamelCase : int ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = '</s>'
_snake_case : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCamelCase ) , 11_03 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_snake_case : List[str] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_snake_case : Dict = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase ).input_ids[0]
_snake_case : str = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase ).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_snake_case : List[str] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_snake_case : Any = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_snake_case : Optional[int] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase ).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_snake_case : int = 'To ensure a smooth flow of bank resolutions.'
_snake_case : str = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_snake_case : Optional[int] = tokenizer([raw_input_str] , return_tensors=UpperCamelCase ).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = ['This is going to be way too long.' * 1_50, 'short example']
_snake_case : Union[str, Any] = ['not super long but more than 5 tokens', 'tiny']
_snake_case : Dict = self._large_tokenizer(UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt' )
_snake_case : Dict = self._large_tokenizer(
text_target=UpperCamelCase , max_length=5 , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : str = {'input_ids': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =PegasusTokenizer
a_ : Optional[Any] =PegasusTokenizerFast
a_ : Tuple =True
a_ : int =True
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : str = PegasusTokenizer(UpperCamelCase , offset=0 , mask_token_sent=UpperCamelCase , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def UpperCamelCase_ ( self : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_snake_case : str = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_snake_case : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase ).input_ids[0]
_snake_case : List[Any] = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase , add_special_tokens=UpperCamelCase ).input_ids[0]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = ['This is going to be way too long.' * 10_00, 'short example']
_snake_case : Any = ['not super long but more than 5 tokens', 'tiny']
_snake_case : Optional[Any] = self._large_tokenizer(UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt' )
_snake_case : Dict = self._large_tokenizer(
text_target=UpperCamelCase , max_length=5 , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors='pt' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase ) == 2 # input_ids, attention_mask.
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_snake_case : Optional[Any] = self._large_tokenizer(UpperCamelCase ).input_ids
self.assertListEqual(
UpperCamelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase_ ( lowerCAmelCase: jnp.ndarray , lowerCAmelCase: int , lowerCAmelCase: float = 1 , lowerCAmelCase: float = 1 , lowerCAmelCase: float = 1.0E4 , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1.0 , )-> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
_snake_case : Dict = float(embedding_dim // 2 )
_snake_case : Any = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_snake_case : List[Any] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
_snake_case : List[str] = jnp.expand_dims(lowerCAmelCase , 1 ) * jnp.expand_dims(lowerCAmelCase , 0 )
# scale embeddings
_snake_case : Dict = scale * emb
if flip_sin_to_cos:
_snake_case : Optional[int] = jnp.concatenate([jnp.cos(lowerCAmelCase ), jnp.sin(lowerCAmelCase )] , axis=1 )
else:
_snake_case : Any = jnp.concatenate([jnp.sin(lowerCAmelCase ), jnp.cos(lowerCAmelCase )] , axis=1 )
_snake_case : Optional[int] = jnp.reshape(lowerCAmelCase , [jnp.shape(lowerCAmelCase )[0], embedding_dim] )
return signal
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ : int =32
a_ : jnp.dtype =jnp.floataa
@nn.compact
def __call__( self : int , UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(UpperCamelCase )
_snake_case : Optional[int] = nn.silu(UpperCamelCase )
_snake_case : List[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(UpperCamelCase )
return temb
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ : int =32
a_ : bool =False
a_ : float =1
@nn.compact
def __call__( self : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 669 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int , lowerCAmelCase: Tuple )-> List[Any]:
_snake_case : Optional[int] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase , lowerCAmelCase )
_snake_case : Dict = a[end]
_snake_case : Union[str, Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
count += _in_place_quick_sort(lowerCAmelCase , lowerCAmelCase , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase , p + 1 , lowerCAmelCase )
return count
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: List[str] , lowerCAmelCase: List[Any] )-> Optional[Any]:
_snake_case : Optional[int] = 0
_snake_case : Optional[Any] = randint(lowerCAmelCase , lowerCAmelCase )
_snake_case : Dict = a[end]
_snake_case : Tuple = a[pivot]
_snake_case : int = temp
_snake_case : int = start - 1
for index in range(lowerCAmelCase , lowerCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : List[Any] = new_pivot_index + 1
_snake_case : Union[str, Any] = a[new_pivot_index]
_snake_case : Any = a[index]
_snake_case : int = temp
_snake_case : Union[str, Any] = a[new_pivot_index + 1]
_snake_case : Dict = a[end]
_snake_case : Optional[Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 100 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 669 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 669 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase_ = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: List[str] )-> int:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Union[str, Any]:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] )-> Any:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_snake_case : Optional[int] = tmp_path_factory.getbasetemp() / 'cache'
_snake_case : Dict = test_hf_cache_home / 'datasets'
_snake_case : List[Any] = test_hf_cache_home / 'metrics'
_snake_case : Dict = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(lowerCAmelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(lowerCAmelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(lowerCAmelCase ) )
_snake_case : Optional[int] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(lowerCAmelCase ) )
_snake_case : Optional[Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCAmelCase ) )
@pytest.fixture(autouse=lowerCAmelCase , scope='session' )
def lowerCamelCase_ ( )-> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Optional[Any]:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , lowerCAmelCase )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: int )-> Union[str, Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , lowerCAmelCase )
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase , UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = load_tool('text-classification' )
self.tool.setup()
_snake_case : Union[str, Any] = load_tool('text-classification' , remote=UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(UpperCamelCase , 'positive' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(UpperCamelCase , 'positive' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(UpperCamelCase , 'positive' )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(UpperCamelCase , 'positive' )
| 669 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase_ = re.compile(r"""\s+""")
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Dict:
return {"hash": hashlib.mda(re.sub(lowerCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Optional[int]:
_snake_case : Optional[int] = [len(lowerCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase ), "line_max": max(lowerCAmelCase )}
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> int:
_snake_case : str = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] )-> int:
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str=5 )-> List[str]:
_snake_case : Dict = ['auto-generated', 'autogenerated', 'automatically generated']
_snake_case : Dict = example['content'].splitlines()
for _, line in zip(range(lowerCAmelCase ) , lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Any=5 , lowerCAmelCase: List[Any]=0.0_5 )-> List[str]:
_snake_case : Tuple = ['unit tests', 'test file', 'configuration file']
_snake_case : Any = example['content'].splitlines()
_snake_case : List[str] = 0
_snake_case : Tuple = 0
# first test
for _, line in zip(range(lowerCAmelCase ) , lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_snake_case : List[str] = example['content'].count('\n' )
_snake_case : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase_ ( lowerCAmelCase: int )-> Dict:
_snake_case : str = ['def ', 'class ', 'for ', 'while ']
_snake_case : List[Any] = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict=4 )-> int:
_snake_case : Tuple = example['content'].splitlines()
_snake_case : Optional[int] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase_ ( lowerCAmelCase: int )-> List[str]:
_snake_case : int = tokenizer(example['content'] , truncation=lowerCAmelCase )['input_ids']
_snake_case : int = len(example['content'] ) / len(lowerCAmelCase )
return {"ratio": ratio}
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Dict:
_snake_case : Optional[int] = {}
results.update(get_hash(lowerCAmelCase ) )
results.update(line_stats(lowerCAmelCase ) )
results.update(alpha_stats(lowerCAmelCase ) )
results.update(char_token_ratio(lowerCAmelCase ) )
results.update(is_autogenerated(lowerCAmelCase ) )
results.update(is_config_or_test(lowerCAmelCase ) )
results.update(has_no_keywords(lowerCAmelCase ) )
results.update(has_few_assignments(lowerCAmelCase ) )
return results
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] )-> Dict:
if not check_uniques(lowerCAmelCase , lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Any:
with open(lowerCAmelCase , 'rb' ) as f_in:
with gzip.open(str(lowerCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
os.unlink(lowerCAmelCase )
# Settings
lowerCAmelCase_ = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase_ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase_ = multiprocessing.cpu_count()
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = load_dataset(args.dataset_name, split="""train""")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
lowerCAmelCase_ = set(ds.unique("""hash"""))
lowerCAmelCase_ = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase_ = time.time()
lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
lowerCAmelCase_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase_ = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
lowerCAmelCase_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase_ = str(data_dir / F"""file-{file_number+1:012}.json""")
lowerCAmelCase_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 669 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] ="""deformable_detr"""
a_ : int ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[int] , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[Any]=3 , UpperCamelCase : Union[str, Any]=3_00 , UpperCamelCase : str=10_24 , UpperCamelCase : int=6 , UpperCamelCase : Optional[int]=10_24 , UpperCamelCase : List[str]=8 , UpperCamelCase : List[Any]=6 , UpperCamelCase : Union[str, Any]=10_24 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : str=True , UpperCamelCase : str="relu" , UpperCamelCase : Optional[Any]=2_56 , UpperCamelCase : str=0.1 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Optional[int]=1.0 , UpperCamelCase : str=True , UpperCamelCase : List[str]=False , UpperCamelCase : Any="sine" , UpperCamelCase : Tuple="resnet50" , UpperCamelCase : List[str]=True , UpperCamelCase : List[Any]=False , UpperCamelCase : Tuple=4 , UpperCamelCase : int=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : List[Any]=False , UpperCamelCase : Dict=3_00 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : str=5 , UpperCamelCase : str=2 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : str=1 , UpperCamelCase : str=5 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.25 , UpperCamelCase : Any=False , **UpperCamelCase : str , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_snake_case : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCamelCase , UpperCamelCase ):
_snake_case : Dict = backbone_config.get('model_type' )
_snake_case : Tuple = CONFIG_MAPPING[backbone_model_type]
_snake_case : int = config_class.from_dict(UpperCamelCase )
_snake_case : Union[str, Any] = use_timm_backbone
_snake_case : Dict = backbone_config
_snake_case : Optional[Any] = num_channels
_snake_case : Any = num_queries
_snake_case : List[Any] = max_position_embeddings
_snake_case : Dict = d_model
_snake_case : Optional[int] = encoder_ffn_dim
_snake_case : Dict = encoder_layers
_snake_case : Any = encoder_attention_heads
_snake_case : List[Any] = decoder_ffn_dim
_snake_case : Optional[int] = decoder_layers
_snake_case : Any = decoder_attention_heads
_snake_case : Tuple = dropout
_snake_case : List[Any] = attention_dropout
_snake_case : Optional[Any] = activation_dropout
_snake_case : Tuple = activation_function
_snake_case : Any = init_std
_snake_case : List[str] = init_xavier_std
_snake_case : Dict = encoder_layerdrop
_snake_case : List[str] = auxiliary_loss
_snake_case : Union[str, Any] = position_embedding_type
_snake_case : Optional[Any] = backbone
_snake_case : List[str] = use_pretrained_backbone
_snake_case : Dict = dilation
# deformable attributes
_snake_case : Optional[int] = num_feature_levels
_snake_case : Optional[Any] = encoder_n_points
_snake_case : Union[str, Any] = decoder_n_points
_snake_case : List[str] = two_stage
_snake_case : Dict = two_stage_num_proposals
_snake_case : Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_snake_case : str = class_cost
_snake_case : List[str] = bbox_cost
_snake_case : str = giou_cost
# Loss coefficients
_snake_case : str = mask_loss_coefficient
_snake_case : Tuple = dice_loss_coefficient
_snake_case : str = bbox_loss_coefficient
_snake_case : str = giou_loss_coefficient
_snake_case : List[Any] = eos_coefficient
_snake_case : List[str] = focal_alpha
_snake_case : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_snake_case : str = self.backbone_config.to_dict()
_snake_case : List[str] = self.__class__.model_type
return output
| 669 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Dict =["""audio_values""", """audio_mask"""]
def __init__( self : List[str] , UpperCamelCase : List[str]=20_48 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Optional[Any]=[16, 16] , UpperCamelCase : Dict=1_28 , UpperCamelCase : str=4_41_00 , UpperCamelCase : List[str]=86 , UpperCamelCase : int=20_48 , UpperCamelCase : Tuple=0.0 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase , )
_snake_case : List[str] = spectrogram_length
_snake_case : Optional[int] = num_channels
_snake_case : List[Any] = patch_size
_snake_case : List[Any] = feature_size // self.patch_size[1]
_snake_case : Optional[Any] = n_fft
_snake_case : Dict = sampling_rate // hop_length_to_sampling_rate
_snake_case : Optional[int] = sampling_rate
_snake_case : Any = padding_value
_snake_case : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=UpperCamelCase , norm='slaney' , mel_scale='slaney' , ).T
def UpperCamelCase_ ( self : int , UpperCamelCase : np.array ):
'''simple docstring'''
_snake_case : Any = spectrogram(
UpperCamelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
_snake_case : List[str] = log_spec[:, :-1]
_snake_case : Optional[Any] = log_spec - 20.0
_snake_case : Union[str, Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[bool] = True , UpperCamelCase : Optional[int] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , **UpperCamelCase : Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_snake_case : str = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_snake_case : Optional[int] = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_snake_case : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
_snake_case : int = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_snake_case : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_snake_case : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_snake_case : List[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCamelCase ):
_snake_case : Union[str, Any] = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_snake_case : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_snake_case : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_snake_case : Optional[Any] = np.array(UpperCamelCase ).astype(np.floataa )
# convert into correct format for padding
_snake_case : List[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_snake_case : List[Any] = np.ones([len(UpperCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_snake_case : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(UpperCamelCase ) ):
_snake_case : str = audio_features[i]
_snake_case : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
_snake_case : Union[str, Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_snake_case : int = {'audio_values': padded_audio_features}
_snake_case : List[str] = BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
return encoded_inputs
| 669 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 1 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase_ = """bart"""
lowerCAmelCase_ = True
@st.cache(allow_output_mutation=lowerCAmelCase )
def lowerCamelCase_ ( )-> int:
if LOAD_DENSE_INDEX:
_snake_case : Any = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_snake_case : Union[str, Any] = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_snake_case : List[Any] = qar_model.eval()
else:
_snake_case , _snake_case : Any = (None, None)
if MODEL_TYPE == "bart":
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_snake_case : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_snake_case : List[str] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_snake_case : Optional[Any] = sas_model.eval()
else:
_snake_case , _snake_case : Optional[int] = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase )
def lowerCamelCase_ ( )-> Dict:
if LOAD_DENSE_INDEX:
_snake_case : Tuple = faiss.StandardGpuResources()
_snake_case : List[str] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_snake_case : Any = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_28) , )
_snake_case : int = faiss.IndexFlatIP(1_28 )
_snake_case : Optional[int] = faiss.index_cpu_to_gpu(lowerCAmelCase , 1 , lowerCAmelCase )
wikiaab_gpu_index_flat.add(lowerCAmelCase ) # TODO fix for larger GPU
else:
_snake_case , _snake_case : Any = (None, None)
_snake_case : Dict = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase )
def lowerCamelCase_ ( )-> Tuple:
_snake_case : Optional[Any] = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_snake_case : Dict = elia['train_eli5']
_snake_case : Tuple = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_28) )
_snake_case : Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(lowerCAmelCase )
return (elia_train, eli5_train_q_index)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = load_indexes()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = load_models()
lowerCAmelCase_ , lowerCAmelCase_ = load_train_data()
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any]=10 )-> Tuple:
_snake_case : Tuple = embed_questions_for_retrieval([question] , lowerCAmelCase , lowerCAmelCase )
_snake_case , _snake_case : int = eli5_train_q_index.search(lowerCAmelCase , lowerCAmelCase )
_snake_case : Dict = [elia_train[int(lowerCAmelCase )] for i in I[0]]
return nn_examples
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Any="wiki40b" , lowerCAmelCase: List[Any]="dense" , lowerCAmelCase: str=10 )-> Optional[Any]:
if source == "none":
_snake_case , _snake_case : List[Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_snake_case , _snake_case : int = query_qa_dense_index(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
_snake_case , _snake_case : Tuple = query_es_index(
lowerCAmelCase , lowerCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=lowerCAmelCase , )
_snake_case : Any = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_snake_case : str = 'question: {} context: {}'.format(lowerCAmelCase , lowerCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase : None),
} )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any , lowerCAmelCase: int , lowerCAmelCase: Tuple=64 , lowerCAmelCase: Tuple=2_56 , lowerCAmelCase: List[str]=False , lowerCAmelCase: str=2 , lowerCAmelCase: Optional[int]=0.9_5 , lowerCAmelCase: Optional[Any]=0.8 )-> Tuple:
with torch.no_grad():
_snake_case : Optional[Any] = qa_sas_generate(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_answers=1 , num_beams=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase , do_sample=lowerCAmelCase , temp=lowerCAmelCase , top_p=lowerCAmelCase , top_k=lowerCAmelCase , max_input_length=10_24 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCAmelCase_ = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCAmelCase_ = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase_ = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase_ = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCAmelCase_ = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCAmelCase_ = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCAmelCase_ = action_list.index(action_st)
lowerCAmelCase_ = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCAmelCase_ = show_type == """Show full text of passages"""
else:
lowerCAmelCase_ = 3
lowerCAmelCase_ = True
lowerCAmelCase_ = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCAmelCase_ = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCAmelCase_ = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCAmelCase_ = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCAmelCase_ = """wiki40b"""
lowerCAmelCase_ = """dense"""
lowerCAmelCase_ = """beam"""
lowerCAmelCase_ = 2
lowerCAmelCase_ = 64
lowerCAmelCase_ = 256
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCAmelCase_ = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCAmelCase_ = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCAmelCase_ = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase_ = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase_ = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase_ = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowerCAmelCase_ = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowerCAmelCase_ = None
# start main text
lowerCAmelCase_ = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCAmelCase_ = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase_ = st.text_input("""Enter your question here:""", """""")
else:
lowerCAmelCase_ = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase_ , lowerCAmelCase_ = make_support(question, source=wiki_source, method="""dense""", n_results=10)
lowerCAmelCase_ , lowerCAmelCase_ = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
lowerCAmelCase_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase_ = support_list[:10]
lowerCAmelCase_ = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCAmelCase_ , lowerCAmelCase_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase_ , lowerCAmelCase_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCAmelCase_ = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCAmelCase_ = res[1].strip()
if sec_titles == "":
lowerCAmelCase_ = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCAmelCase_ = sec_titles.split(""" & """)
lowerCAmelCase_ = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase_ = find_nearest_training(question)
lowerCAmelCase_ = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCAmelCase_ = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCAmelCase_ = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 669 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( lowerCAmelCase: str = "" )-> dict[str, float]:
_snake_case : Optional[Any] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
_snake_case : List[str] = BeautifulSoup(requests.get(lowerCAmelCase ).text , 'html.parser' )
_snake_case : List[Any] = soup.find_all('td' , attrs='titleColumn' )
_snake_case : List[Any] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowerCAmelCase , lowerCAmelCase )
}
def lowerCamelCase_ ( lowerCAmelCase: str = "IMDb_Top_250_Movies.csv" )-> None:
_snake_case : Dict = get_imdb_top_aaa_movies()
with open(lowerCAmelCase , 'w' , newline='' ) as out_file:
_snake_case : Dict = csv.writer(lowerCAmelCase )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 669 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] = 1_00 )-> List[Any]:
_snake_case : Tuple = 0
_snake_case : Any = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : str = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_snake_case : Optional[Any] = 'A painting of a squirrel eating a burger '
_snake_case : int = torch.manual_seed(0 )
_snake_case : Union[str, Any] = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A_ )
_snake_case : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_snake_case : str = generator.manual_seed(0 )
_snake_case : Optional[int] = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_snake_case : str = 'A painting of a squirrel eating a burger '
_snake_case : Optional[int] = torch.manual_seed(0 )
_snake_case : Any = pipe(
prompt=A_ , generator=A_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Optional[int] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
from collections import deque
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Dict = process_name # process name
_snake_case : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_snake_case : Optional[Any] = arrival_time
_snake_case : int = burst_time # remaining burst time
_snake_case : Optional[Any] = 0 # total time of the process wait in ready queue
_snake_case : Dict = 0 # time from arrival time to completion time
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : deque[Process] , UpperCamelCase : int , ):
'''simple docstring'''
_snake_case : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_snake_case : Optional[Any] = time_slices
# unfinished process is in this ready_queue
_snake_case : List[str] = queue
# current time
_snake_case : List[Any] = current_time
# finished process is in this sequence queue
_snake_case : deque[Process] = deque()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCamelCase_ ( self : Any , UpperCamelCase : list[Process] ):
'''simple docstring'''
_snake_case : Optional[int] = []
for i in range(len(UpperCamelCase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : list[Process] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : list[Process] ):
'''simple docstring'''
_snake_case : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : deque[Process] ):
'''simple docstring'''
_snake_case : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase_ ) != 0:
_snake_case : Dict = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_snake_case : Any = 0
# set the process's turnaround time because it is finished
_snake_case : Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
_snake_case : Union[str, Any] = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : deque[Process] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase_ ) ):
_snake_case : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_snake_case : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_snake_case : Dict = 0
# set the finish time
_snake_case : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
_snake_case : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase_ )
self.finish_queue.extend(UpperCamelCase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_snake_case : List[Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_ = Process("""P1""", 0, 53)
lowerCAmelCase_ = Process("""P2""", 0, 17)
lowerCAmelCase_ = Process("""P3""", 0, 68)
lowerCAmelCase_ = Process("""P4""", 0, 24)
lowerCAmelCase_ = 3
lowerCAmelCase_ = [17, 25]
lowerCAmelCase_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_ = Process("""P1""", 0, 53)
lowerCAmelCase_ = Process("""P2""", 0, 17)
lowerCAmelCase_ = Process("""P3""", 0, 68)
lowerCAmelCase_ = Process("""P4""", 0, 24)
lowerCAmelCase_ = 3
lowerCAmelCase_ = [17, 25]
lowerCAmelCase_ = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_ = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
lowerCAmelCase_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> List[str]:
_snake_case : List[str] = {}
with open(_A , 'r' ) as file:
for line_number, line in enumerate(_A ):
_snake_case : int = line.strip()
if line:
_snake_case : Any = line.split()
_snake_case : Any = line_number
_snake_case : List[Any] = words[0]
_snake_case : int = value
return result
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> int:
for attribute in key.split('.' ):
_snake_case : Any = getattr(_A , _A )
_snake_case : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
_snake_case : int = PARAM_MAPPING[full_name.split('.' )[-1]]
_snake_case : Tuple = "param"
if weight_type is not None and weight_type != "param":
_snake_case : Union[str, Any] = getattr(_A , _A ).shape
elif weight_type is not None and weight_type == "param":
_snake_case : Tuple = hf_pointer
for attribute in hf_param_name.split('.' ):
_snake_case : Tuple = getattr(_A , _A )
_snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
_snake_case : List[str] = value[0]
else:
_snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_snake_case : Dict = value
elif weight_type == "weight_g":
_snake_case : Union[str, Any] = value
elif weight_type == "weight_v":
_snake_case : Union[str, Any] = value
elif weight_type == "bias":
_snake_case : int = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
_snake_case : Any = getattr(_A , _A )
_snake_case : int = value
else:
_snake_case : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Any , lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> str:
_snake_case : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
_snake_case : List[Any] = PARAM_MAPPING[full_name.split('.' )[-1]]
_snake_case : int = "param"
if weight_type is not None and weight_type != "param":
_snake_case : Union[str, Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_snake_case : Union[str, Any] = ".".join([key, hf_param_name] )
else:
_snake_case : str = key
_snake_case : List[str] = value if "lm_head" in full_key else value[0]
lowerCAmelCase_ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Any , lowerCAmelCase: Optional[Any]=None , lowerCAmelCase: Tuple=None )-> List[Any]:
_snake_case : int = False
for key, mapped_key in MAPPING.items():
_snake_case : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_snake_case : Tuple = True
if "*" in mapped_key:
_snake_case : Any = name.split(_A )[0].split('.' )[-2]
_snake_case : Optional[int] = mapped_key.replace('*' , _A )
if "weight_g" in name:
_snake_case : Union[str, Any] = "weight_g"
elif "weight_v" in name:
_snake_case : List[Any] = "weight_v"
elif "bias" in name:
_snake_case : List[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : Optional[Any] = "weight"
else:
_snake_case : Tuple = None
if hf_dict is not None:
rename_dict(_A , _A , _A , _A , _A )
else:
set_recursively(_A , _A , _A , _A , _A )
return is_used
return is_used
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: int , lowerCAmelCase: int )-> Dict:
_snake_case : List[Any] = []
_snake_case : Union[str, Any] = fairseq_model.state_dict()
_snake_case : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
_snake_case : Optional[int] = True
else:
_snake_case : Optional[int] = load_wavaveca_layer(_A , _A , _A )
if not is_used:
unused_weights.append(_A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict , lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Union[str, Any] )-> Optional[Any]:
_snake_case : int = full_name.split('conv_layers.' )[-1]
_snake_case : Optional[Any] = name.split('.' )
_snake_case : Any = int(items[0] )
_snake_case : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_snake_case : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_snake_case : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_snake_case : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_snake_case : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Any=None , lowerCAmelCase: List[Any]=None , lowerCAmelCase: Tuple=True , lowerCAmelCase: List[Any]=False )-> List[str]:
if config_path is not None:
_snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_A )
else:
_snake_case : str = WavaVecaConfig()
if is_seq_class:
_snake_case : str = read_txt_into_dict(_A )
_snake_case : List[Any] = idalabel
_snake_case : List[Any] = WavaVecaForSequenceClassification(_A )
_snake_case : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
_snake_case : Dict = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : List[Any] = target_dict.pad_index
_snake_case : str = target_dict.bos_index
_snake_case : str = target_dict.eos_index
_snake_case : Tuple = len(target_dict.symbols )
_snake_case : Optional[Any] = os.path.join(_A , 'vocab.json' )
if not os.path.isdir(_A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
_snake_case : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : str = 0
_snake_case : Dict = 1
with open(_A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_A , _A )
_snake_case : Any = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_A , )
_snake_case : Dict = True if config.feat_extract_norm == "layer" else False
_snake_case : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
_snake_case : Any = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
_snake_case : int = WavaVecaForCTC(_A )
else:
_snake_case : Optional[Any] = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
_snake_case : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_snake_case : Dict = argparse.Namespace(task='audio_pretraining' )
_snake_case : Any = fairseq.tasks.setup_task(_A )
_snake_case : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
_snake_case : List[str] = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 705 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Tuple )-> List[Any]:
_snake_case : List[str] = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
_snake_case : Tuple = downstream_dict['projector.weight']
_snake_case : Any = downstream_dict['projector.bias']
_snake_case : Dict = downstream_dict['model.post_net.linear.weight']
_snake_case : int = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict , lowerCAmelCase: Any )-> int:
_snake_case : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
_snake_case : List[str] = downstream_dict['model.linear.weight']
_snake_case : Optional[int] = downstream_dict['model.linear.bias']
return model
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any] )-> Tuple:
_snake_case : Tuple = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
_snake_case : Tuple = downstream_dict['connector.weight']
_snake_case : str = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_snake_case : Tuple = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_snake_case : str = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_snake_case : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_snake_case : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_snake_case : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_snake_case : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_snake_case : int = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] , lowerCAmelCase: Any , lowerCAmelCase: Union[str, Any] )-> Dict:
_snake_case : List[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
_snake_case : int = checkpoint['Downstream']
_snake_case : Union[str, Any] = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
_snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
_snake_case : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_snake_case : List[str] = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
_snake_case : List[str] = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('ForXVector' ):
_snake_case : Optional[int] = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_snake_case : Optional[int] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 706 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import doctest
from collections import deque
import numpy as np
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
_snake_case : List[Any] = [2, 1, 2, -1]
_snake_case : str = [1, 2, 3, 4]
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.first_signal )
_snake_case : List[str] = len(self.second_signal )
_snake_case : List[str] = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
_snake_case : Union[str, Any] = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
_snake_case : Optional[int] = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
_snake_case : List[str] = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __A ):
'''simple docstring'''
a_ : Optional[int] =["""image_processor""", """tokenizer"""]
a_ : Dict ="""AutoImageProcessor"""
a_ : Dict ="""AutoTokenizer"""
def __init__( self : Dict , UpperCamelCase : int , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
_snake_case : Union[str, Any] = self.image_processor
def __call__( self : Dict , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : int=None , UpperCamelCase : str=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Dict = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : str ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> Union[str, Any]:
if hor == 1_28:
_snake_case : Tuple = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_snake_case : List[str] = (32, 1_28, 2_56)
_snake_case : List[Any] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
_snake_case : Optional[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
_snake_case : List[str] = (32, 64, 1_28, 2_56)
_snake_case : Tuple = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
_snake_case : Optional[int] = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
_snake_case : str = model.state_dict()
_snake_case : List[Any] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
_snake_case : Optional[int] = UNetaDModel(**__lowerCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : Union[str, Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : List[Any] = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase_ ( )-> Optional[int]:
_snake_case : Tuple = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
_snake_case : Optional[int] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
_snake_case : Union[str, Any] = model
_snake_case : str = UNetaDModel(**__lowerCAmelCase )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
_snake_case : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
_snake_case : List[Any] = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 709 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowerCAmelCase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowerCAmelCase_ = logging.WARNING
def lowerCamelCase_ ( )-> str:
_snake_case : Union[str, Any] = os.getenv('DATASETS_VERBOSITY' , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowerCamelCase_ ( )-> str:
return __name__.split('.' )[0]
def lowerCamelCase_ ( )-> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowerCamelCase_ ( )-> None:
_snake_case : List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase_ ( )-> None:
_snake_case : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] = None )-> logging.Logger:
if name is None:
_snake_case : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCamelCase__ )
def lowerCamelCase_ ( )-> int:
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> None:
_get_library_root_logger().setLevel(lowerCamelCase__ )
def lowerCamelCase_ ( )-> Union[str, Any]:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> List[Any]:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> Optional[int]:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> Dict:
return set_verbosity(lowerCamelCase__ )
def lowerCamelCase_ ( )-> None:
_snake_case : Dict = False
def lowerCamelCase_ ( )-> None:
_snake_case : Optional[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ): # pylint: disable=unused-argument
'''simple docstring'''
_snake_case : List[Any] = args[0] if args else None
def __iter__( self : int ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
def empty_fn(*UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict ):
'''simple docstring'''
return self
def __exit__( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
return
lowerCAmelCase_ = True
class _lowerCAmelCase :
'''simple docstring'''
def __call__( self : Dict , *UpperCamelCase : List[Any] , UpperCamelCase : int=False , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase_ , **UpperCamelCase_ )
else:
return EmptyTqdm(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase_ = _tqdm_cls()
def lowerCamelCase_ ( )-> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase_ ( )-> List[Any]:
global _tqdm_active
_snake_case : Dict = True
def lowerCamelCase_ ( )-> Any:
global _tqdm_active
_snake_case : List[Any] = False
| 710 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[str] =["""image_processor""", """tokenizer"""]
a_ : Tuple ="""OwlViTImageProcessor"""
a_ : Union[str, Any] =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[Any] , UpperCamelCase : str=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_snake_case : Union[str, Any] = kwargs.pop('feature_extractor' )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self : Any , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]="max_length" , UpperCamelCase : Dict="np" , **UpperCamelCase : int ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
_snake_case : Tuple = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
_snake_case : Dict = []
# Maximum number of queries across batch
_snake_case : Optional[int] = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
_snake_case : Optional[Any] = t + [" "] * (max_num_queries - len(__A ))
_snake_case : Union[str, Any] = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
_snake_case : int = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
_snake_case : List[str] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_snake_case : str = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
_snake_case : List[str] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_snake_case : Tuple = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
_snake_case : List[str] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_snake_case : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
_snake_case : str = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
_snake_case : Tuple = BatchEncoding()
_snake_case : List[Any] = input_ids
_snake_case : Dict = attention_mask
if query_images is not None:
_snake_case : Tuple = BatchEncoding()
_snake_case : Optional[Any] = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
_snake_case : Tuple = query_pixel_values
if images is not None:
_snake_case : Tuple = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_snake_case : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_snake_case : str = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase_ ( self : List[str] , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
return self.image_processor.post_process(*__A , **__A )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__A , **__A )
def UpperCamelCase_ ( self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def UpperCamelCase_ ( self : Tuple , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase_ ( self : Tuple , *UpperCamelCase : Tuple , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 711 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Tuple="shi-labs/oneformer_demo" )-> Any:
with open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) as f:
_snake_case : str = json.load(lowerCAmelCase )
_snake_case : List[str] = {}
_snake_case : Optional[Any] = []
_snake_case : Optional[Any] = []
for key, info in class_info.items():
_snake_case : Optional[int] = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase ) )
_snake_case : List[str] = thing_ids
_snake_case : Optional[Any] = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=30 , UpperCamelCase : int=4_00 , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Any=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Dict=10 , UpperCamelCase : Dict=False , UpperCamelCase : Dict=2_55 , UpperCamelCase : Dict="shi-labs/oneformer_demo" , UpperCamelCase : Optional[int]="ade20k_panoptic.json" , UpperCamelCase : Tuple=10 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Tuple = num_channels
_snake_case : List[str] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : Optional[Any] = do_resize
_snake_case : Optional[Any] = {'shortest_edge': 32, 'longest_edge': 13_33} if size is None else size
_snake_case : Optional[int] = do_normalize
_snake_case : Any = image_mean
_snake_case : List[Any] = image_std
_snake_case : Any = class_info_file
_snake_case : List[str] = prepare_metadata(UpperCamelCase , UpperCamelCase )
_snake_case : Any = num_text
_snake_case : str = repo_path
# for the post_process_functions
_snake_case : Optional[Any] = 2
_snake_case : str = 10
_snake_case : Union[str, Any] = 10
_snake_case : List[Any] = 3
_snake_case : str = 4
_snake_case : List[Any] = num_labels
_snake_case : str = do_reduce_labels
_snake_case : List[str] = ignore_index
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
if not batched:
_snake_case : Any = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
_snake_case , _snake_case : Any = image.size
else:
_snake_case , _snake_case : Any = image.shape[1], image.shape[2]
if w < h:
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
_snake_case : Any = self.size['shortest_edge']
elif w > h:
_snake_case : int = self.size['shortest_edge']
_snake_case : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
_snake_case : Dict = self.size['shortest_edge']
_snake_case : Dict = self.size['shortest_edge']
else:
_snake_case : List[Any] = []
for image in image_inputs:
_snake_case , _snake_case : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case : List[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
_snake_case : Optional[Any] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a_ : Any =image_processing_class
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_reduce_labels' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
_snake_case : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : List[Any] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : int = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
_snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : Optional[int] = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : Optional[int] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
_snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : int = self.image_processing_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
_snake_case : List[str] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Tuple=False , UpperCamelCase : str=False , UpperCamelCase : Dict="np" ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_snake_case : List[str] = self.image_processing_tester.num_labels
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase )
if with_segmentation_maps:
_snake_case : Optional[int] = num_labels
if is_instance_map:
_snake_case : Union[str, Any] = list(range(UpperCamelCase ) ) * 2
_snake_case : Tuple = dict(enumerate(UpperCamelCase ) )
_snake_case : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_snake_case : int = [Image.fromarray(UpperCamelCase ) for annotation in annotations]
_snake_case : List[Any] = image_processor(
UpperCamelCase , ['semantic'] * len(UpperCamelCase ) , UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=UpperCamelCase , pad_and_return_pixel_mask=UpperCamelCase , )
return inputs
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
def common(UpperCamelCase : Any=False , UpperCamelCase : int=None ):
_snake_case : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase , is_instance_map=UpperCamelCase , segmentation_type=UpperCamelCase )
_snake_case : Union[str, Any] = inputs['mask_labels']
_snake_case : Optional[int] = inputs['class_labels']
_snake_case : Optional[int] = inputs['pixel_values']
_snake_case : Optional[Any] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=UpperCamelCase , segmentation_type='pil' )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = np.zeros((20, 50) )
_snake_case : int = 1
_snake_case : int = 1
_snake_case : Optional[Any] = 1
_snake_case : List[Any] = binary_mask_to_rle(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = fature_extractor.post_process_semantic_segmentation(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_snake_case : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_snake_case : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase , target_sizes=UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : int = image_processor.post_process_instance_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
_snake_case : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_snake_case : Any = image_processor.post_process_panoptic_segmentation(UpperCamelCase , threshold=0 )
self.assertTrue(len(UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 669 | 0 |
import math
import sys
import cva
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: Any )-> np.ndarray:
# For applying gaussian function for each element in matrix.
_snake_case : Union[str, Any] = math.sqrt(__lowerCAmelCase )
_snake_case : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any , lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] )-> np.ndarray:
_snake_case : str = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> np.ndarray:
# Creates a gaussian kernel of given dimension.
_snake_case : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCAmelCase ):
for j in range(0 , __lowerCAmelCase ):
_snake_case : Tuple = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCAmelCase , __lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: int , lowerCAmelCase: str , )-> np.ndarray:
_snake_case : Optional[Any] = np.zeros(img.shape )
_snake_case : Any = get_gauss_kernel(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Any = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_snake_case : str = get_slice(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_snake_case : int = img_s - img_s[kernel_size // 2, kernel_size // 2]
_snake_case : Optional[int] = vec_gaussian(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[Any] = np.multiply(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : List[str] = np.multiply(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Dict = np.sum(__lowerCAmelCase ) / np.sum(__lowerCAmelCase )
_snake_case : Optional[Any] = val
return imga
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> tuple:
_snake_case : List[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
_snake_case : Any = float(args[2] ) if args[2:] else 1.0
_snake_case : Any = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_snake_case : int = int(args[4] )
_snake_case : Tuple = kernel_size + abs(kernel_size % 2 - 1 )
else:
_snake_case : Union[str, Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase_ = parse_args(sys.argv)
lowerCAmelCase_ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowerCAmelCase_ = img / 255
lowerCAmelCase_ = out.astype("""float32""")
lowerCAmelCase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase_ = out * 255
lowerCAmelCase_ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 712 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase_ ( )-> Tuple:
_snake_case : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : int = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def lowerCamelCase_ ( lowerCAmelCase: str=None )-> Any:
if subparsers is not None:
_snake_case : List[Any] = subparsers.add_parser('config' , description=lowerCAmelCase )
else:
_snake_case : Dict = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase )
parser.add_argument(
'--config_file' , default=lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase )
return parser
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Any:
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[str] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Union[str, Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowerCAmelCase )
else:
config.to_yaml_file(lowerCAmelCase )
print(F"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase_ ( )-> Dict:
_snake_case : List[str] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(lowerCAmelCase )
if __name__ == "__main__":
main()
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713 |
# Function to print upper half of diamond (pyramid)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> List[str]:
for i in range(0 , lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> List[Any]:
for i in range(lowerCAmelCase , 0 , -1 ):
for _ in range(lowerCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase ) # upper half
reverse_floyd(lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
lowerCAmelCase_ = 1
while K:
lowerCAmelCase_ = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
lowerCAmelCase_ = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> List[str]:
if not head:
return True
# split the list to two parts
_snake_case , _snake_case : str = head.next, head
while fast and fast.next:
_snake_case : Tuple = fast.next.next
_snake_case : Optional[Any] = slow.next
_snake_case : Optional[Any] = slow.next
_snake_case : Any = None # Don't forget here! But forget still works!
# reverse the second part
_snake_case : Tuple = None
while second:
_snake_case : int = second.next
_snake_case : Optional[int] = node
_snake_case : Optional[int] = second
_snake_case : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_snake_case : Any = node.next
_snake_case : str = head.next
return True
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> List[str]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_snake_case : Tuple = head
while fast and fast.next:
_snake_case , _snake_case : int = fast.next.next, slow.next
# 2. Push the second half into the stack
_snake_case : Any = [slow.val]
while slow.next:
_snake_case : Dict = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_snake_case : Optional[Any] = cur.next
return True
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Dict:
if not head or not head.next:
return True
_snake_case : List[str] = {}
_snake_case : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
_snake_case : int = [pos]
_snake_case : str = head.next
pos += 1
_snake_case : Union[str, Any] = pos - 1
_snake_case : Tuple = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
_snake_case : List[Any] = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 714 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple ="""audio-spectrogram-transformer"""
def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
_snake_case : Tuple = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Optional[Any] = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : int = frequency_stride
_snake_case : List[Any] = time_stride
_snake_case : List[Any] = max_length
_snake_case : List[str] = num_mel_bins
| 669 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =RobertaTokenizer
a_ : str =RobertaTokenizerFast
a_ : List[str] =True
a_ : Dict ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_snake_case : List[str] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_snake_case : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : Union[str, Any] = {"""unk_token""": """<unk>"""}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def UpperCamelCase_ ( self : int , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase_ ( self : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Tuple = """lower newer"""
_snake_case : Dict = """lower newer"""
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[Any] = """lower newer"""
_snake_case : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : Any = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = self.get_tokenizer()
_snake_case : List[Any] = """Encode this sequence."""
_snake_case : Any = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : List[str] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Optional[Any] = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
_snake_case : str = """Encode <mask> sequence"""
_snake_case : Union[str, Any] = """Encode <mask>sequence"""
_snake_case : str = tokenizer.encode(_UpperCamelCase )
_snake_case : Optional[int] = encoded.index(_UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : Any = tokenizer.encode(_UpperCamelCase )
_snake_case : Any = encoded.index(_UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_snake_case : Dict = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_snake_case : str = """A, <mask> AllenNLP sentence."""
_snake_case : int = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : str = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[str] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Tuple = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 715 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: bool = True , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: float = math.inf , lowerCAmelCase: float = -math.inf , lowerCAmelCase: bool = False , lowerCAmelCase: float = 1_00 , lowerCAmelCase: float = 0.0_1 , lowerCAmelCase: float = 1 , )-> Any:
_snake_case : int = False
_snake_case : Any = search_prob
_snake_case : Tuple = start_temperate
_snake_case : Any = []
_snake_case : List[str] = 0
_snake_case : Optional[Any] = None
while not search_end:
_snake_case : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
_snake_case : Dict = current_state
scores.append(lowerCAmelCase )
iterations += 1
_snake_case : Optional[int] = None
_snake_case : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_snake_case : Dict = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
_snake_case : int = neighbors.pop(lowerCAmelCase )
_snake_case : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_snake_case : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_snake_case : Union[str, Any] = picked_neighbor
else:
_snake_case : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_snake_case : int = picked_neighbor
_snake_case : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_snake_case : List[str] = True
else:
_snake_case : Union[str, Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: List[Any] )-> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: Dict )-> Dict:
return (3 * x**2) - (6 * y)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
lowerCAmelCase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 669 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] =KandinskyVaaControlnetPipeline
a_ : str =["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ : Tuple =["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ : List[str] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ : List[str] =False
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return 1_00
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_snake_case : int = UNetaDConditionModel(**_A )
return model
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.dummy_unet
_snake_case : int = self.dummy_movq
_snake_case : str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , )
_snake_case : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Dict=0 ):
'''simple docstring'''
_snake_case : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
_snake_case : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create hint
_snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
_snake_case : List[Any] = torch.manual_seed(_A )
else:
_snake_case : str = torch.Generator(device=_A ).manual_seed(_A )
_snake_case : str = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = 'cpu'
_snake_case : Tuple = self.get_dummy_components()
_snake_case : List[Any] = self.pipeline_class(**_A )
_snake_case : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_snake_case : int = pipe(**self.get_dummy_inputs(_A ) )
_snake_case : Any = output.images
_snake_case : List[str] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
_snake_case : List[Any] = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : int = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_snake_case : Optional[int] = torch.from_numpy(np.array(_A ) ).float() / 2_55.0
_snake_case : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_snake_case : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
_snake_case : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
_snake_case : int = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
_snake_case : List[str] = 'A robot, 4k photo'
_snake_case : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case : Tuple = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case : Any = torch.Generator(device='cuda' ).manual_seed(0 )
_snake_case : List[Any] = pipeline(
image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=1_00 , output_type='np' , )
_snake_case : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_A , _A )
| 716 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : torch.FloatTensor
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str , UpperCamelCase : int = 32 , UpperCamelCase : int = 64 , UpperCamelCase : int = 20 , UpperCamelCase : int = 7_68 , UpperCamelCase : Optional[int]=77 , UpperCamelCase : int=4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "silu" , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "linear" , UpperCamelCase : Optional[str] = "prd" , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : str = num_attention_heads
_snake_case : Optional[int] = attention_head_dim
_snake_case : Any = num_attention_heads * attention_head_dim
_snake_case : List[Any] = additional_embeddings
_snake_case : List[str] = time_embed_dim or inner_dim
_snake_case : int = embedding_proj_dim or embedding_dim
_snake_case : List[Any] = clip_embed_dim or embedding_dim
_snake_case : Optional[Any] = Timesteps(UpperCamelCase , UpperCamelCase , 0 )
_snake_case : List[Any] = TimestepEmbedding(UpperCamelCase , UpperCamelCase , out_dim=UpperCamelCase , act_fn=UpperCamelCase )
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
if embedding_proj_norm_type is None:
_snake_case : str = None
elif embedding_proj_norm_type == "layer":
_snake_case : List[Any] = nn.LayerNorm(UpperCamelCase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_snake_case : str = nn.Linear(UpperCamelCase , UpperCamelCase )
if encoder_hid_proj_type is None:
_snake_case : Any = None
elif encoder_hid_proj_type == "linear":
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase ) )
if added_emb_type == "prd":
_snake_case : str = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase ) )
elif added_emb_type is None:
_snake_case : Dict = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_snake_case : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase , UpperCamelCase , UpperCamelCase , dropout=UpperCamelCase , activation_fn='gelu' , attention_bias=UpperCamelCase , )
for d in range(UpperCamelCase )
] )
if norm_in_type == "layer":
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase )
elif norm_in_type is None:
_snake_case : Optional[Any] = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_snake_case : Optional[Any] = nn.LayerNorm(UpperCamelCase )
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_snake_case : Optional[Any] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCamelCase , persistent=UpperCamelCase )
_snake_case : str = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
_snake_case : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def fn_recursive_add_processors(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase , 'set_processor' ):
_snake_case : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return processors
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
_snake_case : Optional[int] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase : str , UpperCamelCase : torch.nn.Module , UpperCamelCase : Union[str, Any] ):
if hasattr(UpperCamelCase , 'set_processor' ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase , UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Union[torch.Tensor, float, int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.BoolTensor] = None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_snake_case : Dict = hidden_states.shape[0]
_snake_case : str = timestep
if not torch.is_tensor(UpperCamelCase ):
_snake_case : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase ) and len(timesteps.shape ) == 0:
_snake_case : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_snake_case : Optional[int] = timesteps * torch.ones(UpperCamelCase , dtype=timesteps.dtype , device=timesteps.device )
_snake_case : Union[str, Any] = self.time_proj(UpperCamelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_snake_case : Tuple = timesteps_projected.to(dtype=self.dtype )
_snake_case : List[Any] = self.time_embedding(UpperCamelCase )
if self.embedding_proj_norm is not None:
_snake_case : Optional[Any] = self.embedding_proj_norm(UpperCamelCase )
_snake_case : Union[str, Any] = self.embedding_proj(UpperCamelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_snake_case : Dict = self.encoder_hidden_states_proj(UpperCamelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_snake_case : str = self.proj_in(UpperCamelCase )
_snake_case : int = self.positional_embedding.to(hidden_states.dtype )
_snake_case : Optional[int] = []
_snake_case : List[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_snake_case : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_snake_case : str = hidden_states[:, None, :]
_snake_case : str = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_snake_case : int = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase , -1 , -1 )
additional_embeds.append(UpperCamelCase )
_snake_case : Optional[int] = torch.cat(
UpperCamelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_snake_case : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_snake_case : Optional[Any] = F.pad(
UpperCamelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_snake_case : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_snake_case : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_snake_case : Tuple = F.pad(UpperCamelCase , (0, self.additional_embeddings) , value=0.0 )
_snake_case : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_snake_case : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_snake_case : Tuple = self.norm_in(UpperCamelCase )
for block in self.transformer_blocks:
_snake_case : Any = block(UpperCamelCase , attention_mask=UpperCamelCase )
_snake_case : Dict = self.norm_out(UpperCamelCase )
if self.prd_embedding is not None:
_snake_case : str = hidden_states[:, -1]
else:
_snake_case : Any = hidden_states[:, additional_embeddings_len:]
_snake_case : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 669 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =42
a_ : List[str] =42
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )]
_snake_case : int = size
def __getitem__( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self._size
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : List[Any] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : str = 0
while queue:
_snake_case : Union[str, Any] = queue.popleft()
_snake_case : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Tuple = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 1:
_snake_case : int = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase )
_snake_case : int = 1
for i in range(1 , lowerCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int=False )-> Union[str, Any]:
try:
_snake_case : Optional[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case : List[str] = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase_ = parse_flag_from_env("""RUN_SLOW""", default=False)
lowerCAmelCase_ = parse_flag_from_env("""RUN_REMOTE""", default=False)
lowerCAmelCase_ = parse_flag_from_env("""RUN_LOCAL""", default=True)
lowerCAmelCase_ = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
lowerCAmelCase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
lowerCAmelCase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
lowerCAmelCase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
lowerCAmelCase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
lowerCAmelCase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
lowerCAmelCase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
lowerCAmelCase_ = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Dict:
try:
import faiss # noqa
except ImportError:
_snake_case : Dict = unittest.skip('test requires faiss' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Any )-> Optional[int]:
try:
import regex # noqa
except ImportError:
_snake_case : int = unittest.skip('test requires regex' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> List[str]:
try:
import elasticsearch # noqa
except ImportError:
_snake_case : Optional[int] = unittest.skip('test requires elasticsearch' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: int )-> Tuple:
try:
import sqlalchemy # noqa
except ImportError:
_snake_case : List[str] = unittest.skip('test requires sqlalchemy' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> int:
if not config.TORCH_AVAILABLE:
_snake_case : List[Any] = unittest.skip('test requires PyTorch' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: int )-> List[Any]:
if not config.TF_AVAILABLE:
_snake_case : Any = unittest.skip('test requires TensorFlow' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> Any:
if not config.JAX_AVAILABLE:
_snake_case : List[str] = unittest.skip('test requires JAX' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Any:
if not config.PIL_AVAILABLE:
_snake_case : List[Any] = unittest.skip('test requires Pillow' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(a_ )
else:
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> Optional[int]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(a_ )
else:
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> int:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(a_ )
else:
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> int:
def _require_spacy_model(lowerCAmelCase: Optional[int] ):
try:
import spacy # noqa F401
spacy.load(a_ )
except ImportError:
return unittest.skip('test requires spacy' )(a_ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(a_ ) )(a_ )
else:
return test_case
return _require_spacy_model
def lowerCamelCase_ ( lowerCAmelCase: str )-> int:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(a_ )
else:
return test_case
def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Optional[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(a_ )
else:
return test_case
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> str:
if not _run_slow_tests or _run_slow_tests == 0:
_snake_case : Union[str, Any] = unittest.skip('test is slow' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: str )-> List[Any]:
if not _run_local_tests or _run_local_tests == 0:
_snake_case : Optional[int] = unittest.skip('test is local' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
if not _run_packaged_tests or _run_packaged_tests == 0:
_snake_case : int = unittest.skip('test is packaged' )(a_ )
return test_case
def lowerCamelCase_ ( lowerCAmelCase: str )-> Tuple:
if not _run_remote_tests or _run_remote_tests == 0:
_snake_case : Optional[int] = unittest.skip('test requires remote' )(a_ )
return test_case
def lowerCamelCase_ ( *lowerCAmelCase: int )-> Tuple:
def decorate(cls: Any ):
for name, fn in cls.__dict__.items():
if callable(a_ ) and name.startswith('test' ):
for decorator in decorators:
_snake_case : Optional[Any] = decorator(a_ )
setattr(cls , a_ , a_ )
return cls
return decorate
class _lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
pass
class _lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
a_ : Optional[Any] =0
a_ : Any =1
a_ : int =2
@contextmanager
def lowerCamelCase_ ( lowerCAmelCase: List[str]=OfflineSimulationMode.CONNECTION_FAILS , lowerCAmelCase: List[Any]=1E-16 )-> str:
_snake_case : List[Any] = requests.Session().request
def timeout_request(lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: Dict , **lowerCAmelCase: Tuple ):
# Change the url to an invalid url so that the connection hangs
_snake_case : Union[str, Any] = '''https://10.255.255.1'''
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_snake_case : Dict = timeout
try:
return online_request(a_ , a_ , **a_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_snake_case : int = url
_snake_case : Any = e.args[0]
_snake_case : Optional[int] = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
_snake_case : Union[str, Any] = (max_retry_error,)
raise
def raise_connection_error(lowerCAmelCase: Dict , lowerCAmelCase: Dict , **lowerCAmelCase: List[Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=a_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , a_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , a_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , a_ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowerCamelCase_ ( *lowerCAmelCase: Optional[int] , **lowerCAmelCase: Any )-> List[Any]:
_snake_case : Union[str, Any] = str(Path().resolve() )
with tempfile.TemporaryDirectory(*a_ , **a_ ) as tmp_dir:
try:
os.chdir(a_ )
yield
finally:
os.chdir(a_ )
@contextmanager
def lowerCamelCase_ ( )-> Optional[int]:
import gc
gc.collect()
_snake_case : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase_ ( )-> List[str]:
import gc
gc.collect()
_snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase_ ( lowerCAmelCase: Any , lowerCAmelCase: str )-> List[str]:
return deepcopy(a_ ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(a_ ).integers(0 , 1_00 , 10 ).tolist()
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> Tuple:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCAmelCase: Dict , *lowerCAmelCase: int , **lowerCAmelCase: Tuple ):
try:
return func(*a_ , **a_ )
except HTTPError as err:
if str(a_ ).startswith('500' ) or str(a_ ).startswith('502' ):
pytest.xfail(str(a_ ) )
raise err
return decorator.decorator(_wrapper , a_ )
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
_snake_case : Any = returncode
_snake_case : int = stdout
_snake_case : Tuple = stderr
async def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Optional[int] )-> Tuple:
while True:
_snake_case : int = await stream.readline()
if line:
callback(a_ )
else:
break
async def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: Optional[Any]=None , lowerCAmelCase: int=None , lowerCAmelCase: int=None , lowerCAmelCase: List[str]=False , lowerCAmelCase: Optional[Any]=False )-> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(a_ ) )
_snake_case : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case : str = []
_snake_case : Tuple = []
def tee(lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: Any , lowerCAmelCase: Optional[Any]="" ):
_snake_case : Union[str, Any] = line.decode('utf-8' ).rstrip()
sink.append(a_ )
if not quiet:
print(a_ , a_ , file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCAmelCase : tee(a_ , a_ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda lowerCAmelCase : tee(a_ , a_ , sys.stderr , label='stderr:' ) ),
] , timeout=a_ , )
return _RunOutput(await p.wait() , a_ , a_ )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Any=None , lowerCAmelCase: List[str]=None , lowerCAmelCase: Tuple=1_80 , lowerCAmelCase: List[Any]=False , lowerCAmelCase: str=True )-> _RunOutput:
_snake_case : int = asyncio.get_event_loop()
_snake_case : Any = loop.run_until_complete(
_stream_subprocess(a_ , env=a_ , stdin=a_ , timeout=a_ , quiet=a_ , echo=a_ ) )
_snake_case : Optional[Any] = ''' '''.join(a_ )
if result.returncode > 0:
_snake_case : List[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def lowerCamelCase_ ( )-> List[str]:
_snake_case : List[Any] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
_snake_case : Union[str, Any] = re.sub(R'^gw' , '' , a_ , 0 , re.M )
return int(a_ )
def lowerCamelCase_ ( )-> Tuple:
_snake_case : Any = 2_95_00
_snake_case : str = pytest_xdist_worker_id()
return port + uniq_delta
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
lowerCAmelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : List[Any] =VOCAB_FILES_NAMES
a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Any =LxmertTokenizer
def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : Optional[int] = do_lower_case
_snake_case : Dict = strip_accents
_snake_case : Optional[int] = tokenize_chinese_chars
_snake_case : Optional[Any] = normalizer_class(**UpperCamelCase )
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Tuple = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
a_ : Tuple =['pixel_values']
def __init__( self : Optional[int] , UpperCamelCase : bool = True , UpperCamelCase : Optional[Dict[str, int]] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_snake_case : str = size if size is not None else {'shortest_edge': 2_56}
_snake_case : Optional[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_snake_case : Dict = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_snake_case : Any = get_size_dict(__UpperCamelCase , param_name='crop_size' )
_snake_case : Optional[Any] = do_resize
_snake_case : Dict = size
_snake_case : List[Any] = resample
_snake_case : List[str] = do_center_crop
_snake_case : List[str] = crop_size
_snake_case : Optional[Any] = do_rescale
_snake_case : Optional[Any] = rescale_factor
_snake_case : Tuple = do_normalize
_snake_case : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
_snake_case : Optional[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_snake_case : List[str] = get_resize_output_image_size(__UpperCamelCase , size=size['shortest_edge'] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
_snake_case : Optional[int] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
_snake_case : Any = do_resize if do_resize is not None else self.do_resize
_snake_case : int = size if size is not None else self.size
_snake_case : Tuple = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_snake_case : List[str] = resample if resample is not None else self.resample
_snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : List[Any] = crop_size if crop_size is not None else self.crop_size
_snake_case : Optional[Any] = get_size_dict(__UpperCamelCase , param_name='crop_size' )
_snake_case : Any = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Any = image_mean if image_mean is not None else self.image_mean
_snake_case : Optional[int] = image_std if image_std is not None else self.image_std
_snake_case : Optional[int] = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_snake_case : Optional[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_snake_case : Union[str, Any] = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_snake_case : Any = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_snake_case : int = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_snake_case : List[Any] = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_snake_case : Optional[Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_snake_case : int = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[Tuple] = None ):
'''simple docstring'''
_snake_case : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__UpperCamelCase ):
_snake_case : Tuple = target_sizes.numpy()
_snake_case : Union[str, Any] = []
for idx in range(len(__UpperCamelCase ) ):
_snake_case : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__UpperCamelCase )
_snake_case : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
_snake_case : Dict = logits.argmax(dim=1 )
_snake_case : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 719 |
from __future__ import annotations
from random import random
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : int | None = None ):
'''simple docstring'''
_snake_case : str = value
_snake_case : List[Any] = random()
_snake_case : Node | None = None
_snake_case : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = str(self.value ) + ' '
_snake_case : List[Any] = str(self.left or '' )
_snake_case : int = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case : Optional[Any] = split(root.left , lowerCAmelCase )
return left, root
else:
_snake_case , _snake_case : List[str] = split(root.right , lowerCAmelCase )
return root, right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: Node | None )-> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case : str = merge(left.right , lowerCAmelCase )
return left
else:
_snake_case : Union[str, Any] = merge(lowerCAmelCase , right.left )
return right
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case : Tuple = Node(lowerCAmelCase )
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , lowerCAmelCase )
return merge(merge(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: int )-> Node | None:
_snake_case , _snake_case : Optional[int] = split(lowerCAmelCase , value - 1 )
_snake_case , _snake_case : List[str] = split(lowerCAmelCase , lowerCAmelCase )
return merge(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Node | None )-> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( lowerCAmelCase: Node | None , lowerCAmelCase: str )-> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case : List[str] = insert(lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case : Any = erase(lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( )-> None:
_snake_case : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_snake_case : List[Any] = input()
while args != "q":
_snake_case : int = interact_treap(lowerCAmelCase , lowerCAmelCase )
print(lowerCAmelCase )
_snake_case : Tuple = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 669 | 0 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] = "dhaka" , lowerCAmelCase: Union[str, Any] = 5 )-> int:
_snake_case : Tuple = min(lowerCAmelCase , 50 ) # Prevent abuse!
_snake_case : Any = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_snake_case : Optional[int] = requests.get('https://www.google.com/search' , params=lowerCAmelCase , headers=lowerCAmelCase )
_snake_case : List[Any] = BeautifulSoup(html.text , 'html.parser' )
_snake_case : Optional[Any] = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_snake_case : List[str] = json.dumps(lowerCAmelCase )
_snake_case : int = json.loads(lowerCAmelCase )
_snake_case : List[str] = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , lowerCAmelCase , )
if not matched_google_image_data:
return 0
_snake_case : Optional[int] = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(lowerCAmelCase ) , )
_snake_case : Union[str, Any] = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase ):
if index >= max_images:
return index
_snake_case : List[Any] = bytes(lowerCAmelCase , 'ascii' ).decode(
'unicode-escape' )
_snake_case : Any = bytes(lowerCAmelCase , 'ascii' ).decode(
'unicode-escape' )
_snake_case : List[str] = urllib.request.build_opener()
_snake_case : List[Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(lowerCAmelCase )
_snake_case : Dict = F"""query_{query.replace(' ' , '_' )}"""
if not os.path.exists(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
lowerCAmelCase_ = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print("""Please provide a search term.""")
raise
| 720 |
from functools import reduce
lowerCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def lowerCamelCase_ ( lowerCAmelCase: str = N )-> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(lowerCAmelCase ) * int(lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 669 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Dict=False )-> int:
_snake_case : int = OmegaConf.load(__UpperCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCamelCase ) ) )
return config
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[int]=None , lowerCAmelCase: List[str]=None )-> Optional[Any]:
if conf_path is None:
_snake_case : List[str] = """./model_checkpoints/vqgan_only.yaml"""
_snake_case : Tuple = load_config(__UpperCamelCase , display=__UpperCamelCase )
_snake_case : int = VQModel(**config.model.params )
if ckpt_path is None:
_snake_case : List[Any] = """./model_checkpoints/vqgan_only.pt"""
_snake_case : int = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
if ".ckpt" in ckpt_path:
_snake_case : Optional[int] = sd["""state_dict"""]
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
model.to(__UpperCamelCase )
del sd
return model
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: str )-> List[str]:
_snake_case : Union[str, Any] = model.encode(__UpperCamelCase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_snake_case : int = model.decode(__UpperCamelCase )
return xrec
def lowerCamelCase_ ( lowerCAmelCase: Dict , lowerCAmelCase: List[Any]=False )-> List[Any]:
_snake_case : Tuple = string.rsplit('.' , 1 )
if reload:
_snake_case : Tuple = importlib.import_module(__UpperCamelCase )
importlib.reload(__UpperCamelCase )
return getattr(importlib.import_module(__UpperCamelCase , package=__UpperCamelCase ) , cls )
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> List[str]:
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: int=True , lowerCAmelCase: str=True )-> str:
_snake_case : int = instantiate_from_config(__UpperCamelCase )
if sd is not None:
model.load_state_dict(__UpperCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Dict , lowerCAmelCase: Dict , lowerCAmelCase: Tuple )-> Tuple:
if ckpt:
_snake_case : int = torch.load(__UpperCamelCase , map_location='cpu' )
_snake_case : List[str] = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
_snake_case : int = {"""state_dict""": None}
_snake_case : Any = None
_snake_case : Dict = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=__UpperCamelCase , eval_mode=__UpperCamelCase )["""model"""]
return model, global_step
| 721 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase_ ( )-> Any:
_snake_case : List[str] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
_snake_case : Optional[Any] = Dataset.from_dict(lowerCAmelCase )
return dataset
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = get_dataset()
_snake_case : Tuple = make_duplicate_clusters(UpperCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = get_dataset()
_snake_case , _snake_case : str = deduplicate_dataset(UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 2 )
print(UpperCamelCase )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase )
| 669 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] =LEDTokenizer
a_ : Optional[int] =LEDTokenizerFast
a_ : Union[str, Any] =True
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().setUp()
_snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case : List[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
_snake_case : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : int = {'unk_token': '<unk>'}
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def UpperCamelCase_ ( self : Dict , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_snake_case : Union[str, Any] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : str = tokenizer(A_ , max_length=len(A_ ) , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_snake_case : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(A_ , A_ )
@require_torch
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : List[str] = tokenizer(A_ , padding=A_ , return_tensors='pt' )
self.assertIn('input_ids' , A_ )
self.assertIn('attention_mask' , A_ )
self.assertNotIn('labels' , A_ )
self.assertNotIn('decoder_attention_mask' , A_ )
@require_torch
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Dict = tokenizer(text_target=A_ , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : str = tokenizer(
['I am a small frog' * 10_24, 'I am a small frog'] , padding=A_ , truncation=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = ['A long paragraph for summarization.']
_snake_case : List[str] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Union[str, Any] = tokenizer(A_ , return_tensors='pt' )
_snake_case : int = tokenizer(text_target=A_ , return_tensors='pt' )
_snake_case : str = inputs['input_ids']
_snake_case : Any = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_snake_case : Optional[Any] = ['Summary of the text.', 'Another summary.']
_snake_case : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_snake_case : List[str] = tokenizer(A_ , padding=A_ )
_snake_case : Any = [[0] * len(A_ ) for x in encoded_output['input_ids']]
_snake_case : Optional[Any] = tokenizer.pad(A_ )
self.assertSequenceEqual(outputs['global_attention_mask'] , A_ )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
_snake_case : int = self.tokenizer_class.from_pretrained(A_ , **A_ )
_snake_case : List[Any] = 'A, <mask> AllenNLP sentence.'
_snake_case : Dict = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
_snake_case : List[str] = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 700 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =["""image_processor""", """tokenizer"""]
a_ : Optional[int] ="""CLIPImageProcessor"""
a_ : Optional[Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
_snake_case : Optional[Any] = kwargs.pop('feature_extractor' )
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[int] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
_snake_case : Optional[int] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
_snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( lowerCAmelCase: str )-> bool:
_snake_case : Optional[int] = 0
for ch in input_str:
_snake_case : int = ord(__SCREAMING_SNAKE_CASE )
_snake_case : Optional[Any] = pow(2 , __SCREAMING_SNAKE_CASE )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase_ = """http://www.mocksite.com/file1.txt"""
lowerCAmelCase_ = """\"text\": [\"foo\", \"foo\"]"""
lowerCAmelCase_ = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =200
a_ : List[str] ={"""Content-Length""": """100"""}
a_ : Tuple ={}
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Any ):
'''simple docstring'''
return [bytes(UpperCamelCase , 'utf-8' )]
def lowerCamelCase_ ( *lowerCAmelCase: Tuple , **lowerCAmelCase: Tuple )-> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> Optional[Any]:
import requests
monkeypatch.setattr(lowerCAmelCase , 'request' , lowerCAmelCase )
_snake_case : List[str] = URL
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[int] = url
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Any = [url]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': url}
_snake_case : int = 'dummy'
_snake_case : Optional[Any] = 'downloads'
_snake_case : Union[str, Any] = tmp_path
_snake_case : Dict = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase , lowerCAmelCase ) , use_etag=lowerCAmelCase , )
_snake_case : str = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Optional[int] = dl_manager.download(lowerCAmelCase )
_snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = [downloaded_paths]
_snake_case : List[str] = [urls]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in downloaded_paths.keys()
_snake_case : Any = downloaded_paths.values()
_snake_case : List[str] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase , lowerCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : str = Path(lowerCAmelCase )
_snake_case : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : List[str] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : Any = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_snake_case : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Any )-> str:
_snake_case : str = str(lowerCAmelCase )
if issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : str = filename
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[Any] = [filename]
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Optional[Any] = {'train': filename}
_snake_case : Any = 'dummy'
_snake_case : Union[str, Any] = xz_file.parent
_snake_case : int = 'extracted'
_snake_case : Union[str, Any] = DownloadConfig(
cache_dir=lowerCAmelCase , use_etag=lowerCAmelCase , )
_snake_case : List[str] = DownloadManager(dataset_name=lowerCAmelCase , download_config=lowerCAmelCase )
_snake_case : Dict = dl_manager.extract(lowerCAmelCase )
_snake_case : Optional[int] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = [extracted_paths]
_snake_case : int = [paths]
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase , lowerCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : List[str] = Path(lowerCAmelCase )
_snake_case : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase , etag=lowerCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : Optional[int] = extracted_path.read_text()
_snake_case : int = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] )-> Dict:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(lowerCAmelCase , start=1 ):
_snake_case : Dict = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: List[str] )-> Dict:
_snake_case : List[str] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: int )-> str:
_snake_case : List[Any] = request.getfixturevalue(lowerCAmelCase )
_snake_case : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase ) , start=1 ):
_test_jsonl(lowerCAmelCase , lowerCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase_ ( lowerCAmelCase: Any )-> int:
_snake_case : Tuple = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase ) , start=1 ):
assert os.path.basename(lowerCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 669 | 0 |
lowerCAmelCase_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def lowerCamelCase_ ( lowerCAmelCase: float )-> Any:
assert type(lowerCAmelCase ) in (int, float) and decimal == int(lowerCAmelCase )
_snake_case : Tuple = int(lowerCAmelCase )
_snake_case : List[Any] = ''
_snake_case : str = False
if decimal < 0:
_snake_case : Tuple = True
decimal *= -1
while decimal > 0:
_snake_case , _snake_case : Tuple = divmod(lowerCAmelCase , 16 )
_snake_case : int = values[remainder] + hexadecimal
_snake_case : Tuple = '0x' + hexadecimal
if negative:
_snake_case : str = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar("""KEY""")
lowerCAmelCase_ = TypeVar("""VAL""")
@dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_ )
class _lowerCAmelCase ( Generic[KEY, VAL] ):
'''simple docstring'''
a_ : int =42
a_ : List[str] =42
class _lowerCAmelCase ( _Item ):
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
super().__init__(__A , __A )
def __bool__( self : Dict ):
'''simple docstring'''
return False
lowerCAmelCase_ = _DeletedItem()
class _lowerCAmelCase ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : int = 8 , UpperCamelCase : float = 0.75 ):
'''simple docstring'''
_snake_case : Dict = initial_block_size
_snake_case : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_snake_case : Optional[Any] = capacity_factor
_snake_case : str = 0
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : KEY ):
'''simple docstring'''
return hash(__A ) % len(self._buckets )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : KEY , UpperCamelCase : VAL ):
'''simple docstring'''
_snake_case : Tuple = self._buckets[ind]
if not stored:
_snake_case : List[Any] = _Item(__A , __A )
self._len += 1
return True
elif stored.key == key:
_snake_case : List[str] = _Item(__A , __A )
return True
else:
return False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__A )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
_snake_case : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Optional[Any] = self._buckets
_snake_case : Optional[int] = [None] * new_size
_snake_case : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase_ ( self : int , UpperCamelCase : KEY ):
'''simple docstring'''
_snake_case : List[Any] = self._get_bucket_index(__A )
for _ in range(len(self._buckets ) ):
yield ind
_snake_case : int = self._get_next_ind(__A )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : KEY , UpperCamelCase : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(__A ):
if self._try_set(__A , __A , __A ):
break
def __setitem__( self : Union[str, Any] , UpperCamelCase : KEY , UpperCamelCase : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__A , __A )
def __delitem__( self : List[str] , UpperCamelCase : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(__A ):
_snake_case : str = self._buckets[ind]
if item is None:
raise KeyError(__A )
if item is _deleted:
continue
if item.key == key:
_snake_case : str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , UpperCamelCase : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(__A ):
_snake_case : Dict = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__A )
def __len__( self : int ):
'''simple docstring'''
return self._len
def __iter__( self : List[str] ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 703 |
from random import randint, random
def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: int = 5 , )-> list:
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[str] = 0
_snake_case : List[str] = max(lowerCAmelCase , 0 )
while i < number_of_cells:
_snake_case : Optional[Any] = (
randint(0 , lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int )-> int:
_snake_case : Dict = 0
_snake_case : Optional[Any] = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase , -1 )
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : List[Any] = len(lowerCAmelCase )
# Beforce calculations, the highway is empty
_snake_case : List[Any] = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : int = min(highway_now[car_index] + 1 , lowerCAmelCase )
# Number of empty cell before the next car
_snake_case : Tuple = get_distance(lowerCAmelCase , lowerCAmelCase ) - 1
# We can't have the car causing an accident
_snake_case : Union[str, Any] = min(next_highway[car_index] , lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int , lowerCAmelCase: float , lowerCAmelCase: int )-> list:
_snake_case : Dict = len(highway[0] )
for i in range(lowerCAmelCase ):
_snake_case : Any = update(highway[i] , lowerCAmelCase , lowerCAmelCase )
_snake_case : Tuple = [-1] * number_of_cells
for car_index in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Union[str, Any] = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Tuple = speed
highway.append(lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: PreTrainedTokenizer , lowerCAmelCase: int , lowerCAmelCase: Optional[int] = None , )-> Optional[Any]:
_snake_case : str = {}
if train_file is not None:
_snake_case : Optional[Any] = [train_file]
if eval_file is not None:
_snake_case : int = [eval_file]
if test_file is not None:
_snake_case : int = [test_file]
_snake_case : Optional[int] = datasets.load_dataset('csv' , data_files=_lowerCamelCase )
_snake_case : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
_snake_case : int = features_name.pop(_lowerCamelCase )
_snake_case : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
_snake_case : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
_snake_case : Optional[int] = tokenizer.model_input_names
_snake_case : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
_snake_case : List[Any] = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
_snake_case : Any = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_snake_case : Tuple = {k: v for k, v in ex.items() if k in input_names}
_snake_case : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_snake_case : int = {k: v for k, v in ex.items() if k in input_names}
_snake_case : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_snake_case : int = {k: v for k, v in ex.items() if k in input_names}
_snake_case : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
_snake_case : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_snake_case : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_snake_case : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_snake_case : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_snake_case : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_snake_case : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[int] =field(metadata={"""help""": """Which column contains the label"""} )
a_ : int =field(default=lowercase__ , metadata={"""help""": """The path of the training file"""} )
a_ : Dict =field(default=lowercase__ , metadata={"""help""": """The path of the development file"""} )
a_ : Tuple =field(default=lowercase__ , metadata={"""help""": """The path of the test file"""} )
a_ : Optional[int] =field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : Any =field(
default=lowercase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[int] =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : Tuple =field(
default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : List[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : Union[str, Any] =field(default=lowercase__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ : Tuple =field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCamelCase_ ( )-> int:
_snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_snake_case : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_snake_case : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_snake_case : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_snake_case : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase: EvalPrediction ) -> Dict:
_snake_case : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_snake_case : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case : Dict = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case : str = trainer.evaluate()
_snake_case : Union[str, Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(_lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: int = 1_00 )-> str:
_snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
_snake_case : str = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 705 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] )-> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_snake_case : Tuple = TOKENIZER_CLASSES
else:
_snake_case : Union[str, Any] = {tokenizer_name: getattr(lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_snake_case : Dict = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : Optional[Any] = True
if checkpoint_name is None:
_snake_case : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_snake_case : str = tokenizer_class.from_pretrained(lowerCAmelCase , force_download=lowerCAmelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Tuple = checkpoint.split('/' )
_snake_case : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
elif add_prefix:
_snake_case : Dict = checkpoint
_snake_case : Optional[Any] = dump_path
else:
_snake_case : str = None
_snake_case : Union[str, Any] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Optional[int] = file_path.split(lowerCAmelCase )[-1][0]
if next_char == "/":
_snake_case : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
_snake_case : str = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_snake_case : Optional[int] = tokenizer.save_pretrained(
lowerCAmelCase , legacy_format=lowerCAmelCase , filename_prefix=lowerCAmelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCAmelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = nn.Linear(3 , 4 )
_snake_case : int = nn.BatchNormad(4 )
_snake_case : Tuple = nn.Linear(4 , 5 )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : int ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_ , model.state_dict() )
_snake_case : str = os.path.join(a_ , 'index.json' )
self.assertTrue(os.path.isfile(a_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_snake_case : int = os.path.join(a_ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(a_ ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_snake_case : Any = torch.randn(2 , 3 , dtype=a_ )
with TemporaryDirectory() as tmp_dir:
_snake_case : List[Any] = offload_weight(a_ , 'weight' , a_ , {} )
_snake_case : str = os.path.join(a_ , 'weight.dat' )
self.assertTrue(os.path.isfile(a_ ) )
self.assertDictEqual(a_ , {'weight': {'shape': [2, 3], 'dtype': str(a_ ).split('.' )[1]}} )
_snake_case : List[str] = load_offloaded_weight(a_ , index['weight'] )
self.assertTrue(torch.equal(a_ , a_ ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = ModelForTest()
_snake_case : int = model.state_dict()
_snake_case : str = {k: v for k, v in state_dict.items() if """linear2""" not in k}
_snake_case : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_ , a_ )
_snake_case : Tuple = OffloadedWeightsLoader(state_dict=a_ , save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_ , weight_map[key] ) )
_snake_case : Dict = {k: v for k, v in state_dict.items() if """weight""" in k}
_snake_case : str = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_ , a_ )
_snake_case : Optional[int] = OffloadedWeightsLoader(state_dict=a_ , save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_ , a_ )
# Duplicates are removed
_snake_case : Union[str, Any] = OffloadedWeightsLoader(state_dict=a_ , save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_ , weight_map[key] ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
_snake_case : Optional[Any] = extract_submodules_state_dict(a_ , ['a.1', 'a.2'] )
self.assertDictEqual(a_ , {'a.1': 0, 'a.2': 2} )
_snake_case : List[Any] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
_snake_case : Tuple = extract_submodules_state_dict(a_ , ['a.1', 'a.2'] )
self.assertDictEqual(a_ , {'a.1.a': 0, 'a.2.a': 2} )
| 706 |
def lowerCamelCase_ ( lowerCAmelCase: bytes )-> str:
return "".join([hex(lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(lowerCAmelCase )] )
def lowerCamelCase_ ( lowerCAmelCase: str )-> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] )-> List[Any]:
if "model" in orig_key:
_snake_case : Union[str, Any] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
_snake_case : List[str] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
_snake_case : Optional[Any] = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
_snake_case : Union[str, Any] = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
_snake_case : Optional[int] = orig_key.split('.' )[0].split('_' )[-1]
_snake_case : Union[str, Any] = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_snake_case : Dict = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
_snake_case : Union[str, Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
_snake_case : int = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
_snake_case : str = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
_snake_case : str = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
_snake_case : Any = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
_snake_case : str = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
_snake_case : List[str] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
_snake_case : Dict = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
_snake_case : Tuple = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
_snake_case : List[str] = 'yoso.' + orig_key
return orig_key
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Any )-> List[Any]:
for key in orig_state_dict.copy().keys():
_snake_case : Any = orig_state_dict.pop(__snake_case )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_snake_case : str = val
_snake_case : List[Any] = orig_state_dict['cls.predictions.decoder.bias']
_snake_case : Dict = torch.arange(__snake_case ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[Any] )-> int:
_snake_case : Union[str, Any] = torch.load(__snake_case , map_location='cpu' )['model_state_dict']
_snake_case : Tuple = YosoConfig.from_json_file(__snake_case )
_snake_case : Optional[Any] = YosoForMaskedLM(__snake_case )
_snake_case : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __snake_case )
print(model.load_state_dict(__snake_case ) )
model.eval()
model.save_pretrained(__snake_case )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 707 |
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
lowerCAmelCase_ = """"""
def lowerCamelCase_ ( lowerCAmelCase: str )-> None:
# authorize twitter, initialize tweepy
_snake_case : Optional[Any] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
_snake_case : List[Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
_snake_case : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_snake_case : List[str] = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
_snake_case : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_snake_case : Tuple = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
_snake_case : List[str] = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_snake_case : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
_snake_case : Any = csv.writer(lowerCAmelCase )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 669 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_ ( lowerCAmelCase: Accelerator , lowerCAmelCase: int = 16 , lowerCAmelCase: str = "bert-base-cased" )-> Tuple:
_snake_case : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
_snake_case : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase: str ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : Optional[Any] = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase: List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
_snake_case : str = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: str , lowerCAmelCase: str , lowerCAmelCase: Dict )-> Tuple:
model.eval()
_snake_case : List[str] = 0
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
_snake_case : int = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_snake_case : List[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase__ ) - 1:
_snake_case : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_snake_case : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
_snake_case : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Dict )-> List[str]:
# Initialize accelerator
_snake_case : Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : List[Any] = config["""lr"""]
_snake_case : int = int(config['num_epochs'] )
_snake_case : Dict = int(config['seed'] )
_snake_case : Optional[Any] = int(config['batch_size'] )
_snake_case : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase__ )
_snake_case : Tuple = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : int = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
# Instantiate optimizer
_snake_case : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_snake_case : str = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase__ )
if accelerator.state.deepspeed_plugin is not None:
_snake_case : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_snake_case : List[Any] = 1
_snake_case : List[str] = (len(UpperCAmelCase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_snake_case : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase__ , )
else:
_snake_case : str = DummyScheduler(UpperCAmelCase__ , total_num_steps=UpperCAmelCase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case : Optional[int] = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# We need to keep track of how many total steps we have iterated over
_snake_case : Any = 0
# We also need to keep track of the stating epoch so files are named properly
_snake_case : Optional[Any] = 0
_snake_case : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
_snake_case : Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
_snake_case : List[str] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_snake_case : Optional[int] = args.resume_from_checkpoint.split('epoch_' )[1]
_snake_case : str = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_snake_case : Union[str, Any] = int(UpperCAmelCase__ ) + 1
_snake_case : Optional[Any] = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase__ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , 'r' ) as f:
_snake_case : Any = json.load(UpperCAmelCase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_snake_case : Optional[int] = {}
for epoch in range(UpperCAmelCase__ , UpperCAmelCase__ ):
model.train()
for step, batch in enumerate(UpperCAmelCase__ ):
_snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
_snake_case : Dict = outputs.loss
_snake_case : List[str] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_snake_case : List[str] = F"""epoch_{epoch}"""
_snake_case : Dict = os.path.join(args.output_dir , UpperCAmelCase__ )
accelerator.save_state(UpperCAmelCase__ )
_snake_case : Dict = evaluation_loop(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_snake_case : Optional[int] = accuracy
_snake_case : List[str] = lr_scheduler.get_lr()[0]
_snake_case : List[str] = optimizer.param_groups[0]["""lr"""]
_snake_case : List[str] = epoch
_snake_case : List[str] = overall_step
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( )-> str:
_snake_case : Optional[int] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase__ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase__ , default=2 , help='Number of train epochs.' , )
_snake_case : Dict = parser.parse_args()
_snake_case : Tuple = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ViTFeatureExtractor''']
lowerCAmelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.