code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "instructblip_vision_model"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=1_408 , __SCREAMING_SNAKE_CASE : Any=6_144 , __SCREAMING_SNAKE_CASE : List[Any]=39 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Dict=224 , __SCREAMING_SNAKE_CASE : int=14 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : str=1E-6 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-10 , __SCREAMING_SNAKE_CASE : Dict=True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = qkv_bias
@classmethod
def UpperCAmelCase__ ( cls : List[str] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "instructblip_qformer"
def __init__( self : int , __SCREAMING_SNAKE_CASE : Dict=30_522 , __SCREAMING_SNAKE_CASE : Any=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Tuple=3_072 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : int=512 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Tuple="absolute" , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1_408 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = cross_attention_frequency
__SCREAMING_SNAKE_CASE = encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls : List[str] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
__SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "instructblip"
lowerCAmelCase__ = True
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=32 , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__SCREAMING_SNAKE_CASE = InstructBlipVisionConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = InstructBlipQFormerConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE = num_query_tokens
__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.02
@classmethod
def UpperCAmelCase__ ( cls : List[str] , __SCREAMING_SNAKE_CASE : InstructBlipVisionConfig , __SCREAMING_SNAKE_CASE : InstructBlipQFormerConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : Dict , ) -> Optional[Any]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 367 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if prompt is not None:
__SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
__SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
__SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__SCREAMING_SNAKE_CASE = None
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name )
__SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
__SCREAMING_SNAKE_CASE = {
"""generated_text""": self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 331 | 0 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
return str(a__ ) == str(a__ )[::-1]
def a__ ( a__ ):
"""simple docstring"""
return int(a__ ) + int(str(a__ )[::-1] )
def a__ ( a__ = 1_00_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for num in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = num
while iterations < 50:
__SCREAMING_SNAKE_CASE = sum_reverse(a__ )
iterations += 1
if is_palindrome(a__ ):
break
else:
lychrel_nums.append(a__ )
return len(a__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331 | 0 |
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 369 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" )
__SCREAMING_SNAKE_CASE = """"""
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 331 | 0 |
'''simple docstring'''
def a__ ( ):
return 1
def a__ ( a__ ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a__ ( a__ ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(a__ )
def a__ ( a__ ):
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(a__ )
def a__ ( a__ = 2_00 ):
return two_pound(a__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def a__ ( a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = None , ):
"""simple docstring"""
if config_name_or_path is None:
__SCREAMING_SNAKE_CASE = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE = question_encoder_name_or_path
__SCREAMING_SNAKE_CASE = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE = gen_config
__SCREAMING_SNAKE_CASE = question_encoder_config
__SCREAMING_SNAKE_CASE = model_class.from_pretrained_question_encoder_generator(
a__ , a__ , config=a__ )
rag_model.save_pretrained(a__ )
# Sanity check.
model_class.from_pretrained(a__ )
# Save tokenizers.
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCAmelCase : Optional[int] = parser.parse_args()
UpperCAmelCase : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 371 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_choices
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_000
__SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "markuplm"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE = tag_pad_id
__SCREAMING_SNAKE_CASE = subs_pad_id
__SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 331 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = 42
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : Tuple[str] = ("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE : Tuple[int] = (64,) , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "silu" , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 256 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : float = 0.18215 , __SCREAMING_SNAKE_CASE : str = "group" , ) -> List[str]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__SCREAMING_SNAKE_CASE = Encoder(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , down_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , double_z=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vq_embed_dim if vq_embed_dim is not None else latent_channels
__SCREAMING_SNAKE_CASE = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
__SCREAMING_SNAKE_CASE = VectorQuantizer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , beta=0.25 , remap=__SCREAMING_SNAKE_CASE , sane_index_shape=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
# pass init params to Decoder
__SCREAMING_SNAKE_CASE = Decoder(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , up_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , norm_type=__SCREAMING_SNAKE_CASE , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> VQEncoderOutput:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.encoder(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.quant_conv(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__SCREAMING_SNAKE_CASE )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.quantize(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = h
__SCREAMING_SNAKE_CASE = self.post_quant_conv(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.decoder(__SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sample
__SCREAMING_SNAKE_CASE = self.encode(__SCREAMING_SNAKE_CASE ).latents
__SCREAMING_SNAKE_CASE = self.decode(__SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
| 351 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 331 | 0 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=a__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=a__ , default=5 )
parser.add_argument("""--batch_size""" , type=a__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=a__ , default=1 )
parser.add_argument("""--freeze""" , type=a__ , default=a__ )
parser.add_argument("""--learning_rate""" , type=a__ , default=5E-4 )
parser.add_argument("""--seed""" , type=a__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=a__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=a__ , default=10 )
parser.add_argument("""--weight_decay""" , type=a__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=a__ , default="""./results""" )
return parser.parse_args()
UpperCAmelCase : List[Any] = load('accuracy')
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eval_pred
__SCREAMING_SNAKE_CASE = np.argmax(a__ , axis=1 )
return metric.compute(predictions=a__ , references=a__ )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> None:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = trainer
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
if control.should_evaluate:
__SCREAMING_SNAKE_CASE = deepcopy(__SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_args()
set_seed(args.seed )
__SCREAMING_SNAKE_CASE = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
__SCREAMING_SNAKE_CASE = dataset.train_test_split(test_size=0.2 )
__SCREAMING_SNAKE_CASE = train_test["""test"""].train_test_split(test_size=0.5 )
__SCREAMING_SNAKE_CASE = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
__SCREAMING_SNAKE_CASE = tokenizer.eos_token
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__SCREAMING_SNAKE_CASE = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(a__ ):
__SCREAMING_SNAKE_CASE = tokenizer(example["""src"""] , truncation=a__ , max_length=10_24 )
__SCREAMING_SNAKE_CASE = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__SCREAMING_SNAKE_CASE = train_test_validation.map(
a__ , batched=a__ , remove_columns=train_test_validation["""train"""].column_names , )
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(tokenizer=a__ )
__SCREAMING_SNAKE_CASE = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
__SCREAMING_SNAKE_CASE = Trainer(
model=a__ , args=a__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=a__ , data_collator=a__ , compute_metrics=a__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(a__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 352 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 331 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = 42
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : Tuple[str] = ("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE : Tuple[int] = (64,) , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "silu" , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : float = 0.18215 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__SCREAMING_SNAKE_CASE = Encoder(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , down_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , double_z=__SCREAMING_SNAKE_CASE , )
# pass init params to Decoder
__SCREAMING_SNAKE_CASE = Decoder(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , up_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__SCREAMING_SNAKE_CASE = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
# only relevant if vae tiling is enabled
__SCREAMING_SNAKE_CASE = self.config.sample_size
__SCREAMING_SNAKE_CASE = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__SCREAMING_SNAKE_CASE = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__SCREAMING_SNAKE_CASE = 0.25
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=False ) -> List[Any]:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , (Encoder, Decoder) ):
__SCREAMING_SNAKE_CASE = value
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : bool = True ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = use_tiling
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.enable_tiling(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Dict ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
def fn_recursive_add_processors(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, AttentionProcessor] ):
if hasattr(__SCREAMING_SNAKE_CASE , """set_processor""" ):
__SCREAMING_SNAKE_CASE = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return processors
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(__SCREAMING_SNAKE_CASE )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : Optional[Any] ):
if hasattr(__SCREAMING_SNAKE_CASE , """set_processor""" ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.set_processor(__SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
__SCREAMING_SNAKE_CASE = [self.encoder(__SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
__SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self.encoder(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.quant_conv(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.post_quant_conv(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.decoder(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
__SCREAMING_SNAKE_CASE = [self._decode(__SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
__SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self._decode(__SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = min(a.shape[2] , b.shape[2] , __SCREAMING_SNAKE_CASE )
for y in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = min(a.shape[3] , b.shape[3] , __SCREAMING_SNAKE_CASE )
for x in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * self.tile_overlap_factor )
__SCREAMING_SNAKE_CASE = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__SCREAMING_SNAKE_CASE = []
for i in range(0 , x.shape[2] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = []
for j in range(0 , x.shape[3] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__SCREAMING_SNAKE_CASE = self.encoder(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.quant_conv(__SCREAMING_SNAKE_CASE )
row.append(__SCREAMING_SNAKE_CASE )
rows.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = []
for j, tile in enumerate(__SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if j > 0:
__SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=3 ) )
__SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE , dim=2 )
__SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * self.tile_overlap_factor )
__SCREAMING_SNAKE_CASE = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__SCREAMING_SNAKE_CASE = []
for i in range(0 , z.shape[2] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = []
for j in range(0 , z.shape[3] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__SCREAMING_SNAKE_CASE = self.post_quant_conv(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.decoder(__SCREAMING_SNAKE_CASE )
row.append(__SCREAMING_SNAKE_CASE )
rows.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
for i, row in enumerate(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = []
for j, tile in enumerate(__SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if j > 0:
__SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=3 ) )
__SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sample
__SCREAMING_SNAKE_CASE = self.encode(__SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
__SCREAMING_SNAKE_CASE = posterior.sample(generator=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = posterior.mode()
__SCREAMING_SNAKE_CASE = self.decode(__SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
| 354 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "marian"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=58_101 , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[Any]=1_024 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Any=4_096 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=4_096 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=1_024 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=58_100 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : str=58_100 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=True , **__SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = decoder_vocab_size or vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE = {0: """batch"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCAmelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super().outputs
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
__SCREAMING_SNAKE_CASE = common_inputs["""decoder_input_ids"""].shape[1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers
__SCREAMING_SNAKE_CASE = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
torch.zeros(__SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) )
return common_inputs
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_encoder_and_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 )
__SCREAMING_SNAKE_CASE = [
(torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE )
]
return common_inputs
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) )
return common_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
return common_inputs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self : str ) -> float:
"""simple docstring"""
return 1E-4
| 355 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( a__ ):
"""simple docstring"""
return x + 2
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
__SCREAMING_SNAKE_CASE = """x = y"""
__SCREAMING_SNAKE_CASE = {"""y""": 5}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = add_two(x)"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} )
__SCREAMING_SNAKE_CASE = {"""x""": 8}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = x"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
| 331 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCAmelCase : str = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(__SCREAMING_SNAKE_CASE ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 356 |
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase : Any = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
UpperCAmelCase : List[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
UpperCAmelCase : Any = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any="uniform_average" , __SCREAMING_SNAKE_CASE : Any=True ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = mean_squared_error(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , multioutput=__SCREAMING_SNAKE_CASE , squared=__SCREAMING_SNAKE_CASE )
return {"mse": mse}
| 357 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 | 0 |
UpperCAmelCase : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 358 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase__ ( self : int , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
__SCREAMING_SNAKE_CASE = samplea.shape[0]
__SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 )
__SCREAMING_SNAKE_CASE = torch.arange(__SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 359 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase : Dict = TypeVar('T')
def a__ ( a__ ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self : Tuple ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def a__ ( a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 331 | 0 |
'''simple docstring'''
def _snake_case ( a__ ):
"""simple docstring"""
if edge <= 0 or not isinstance(a__ , a__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _snake_case ( a__ ):
"""simple docstring"""
if edge <= 0 or not isinstance(a__ , a__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase : List[Any] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
__SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(__SCREAMING_SNAKE_CASE , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
__SCREAMING_SNAKE_CASE = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
__SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__SCREAMING_SNAKE_CASE = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__SCREAMING_SNAKE_CASE , """w""" , newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __SCREAMING_SNAKE_CASE , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with a really long name
__SCREAMING_SNAKE_CASE = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub("""Bert""" , """TestModel""" , __SCREAMING_SNAKE_CASE ) , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
self.assertFalse(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__SCREAMING_SNAKE_CASE = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 361 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "retribert"
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 331 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( a__ ):
"""simple docstring"""
return x + 2
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
__SCREAMING_SNAKE_CASE = """x = y"""
__SCREAMING_SNAKE_CASE = {"""y""": 5}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = add_two(x)"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} )
__SCREAMING_SNAKE_CASE = {"""x""": 8}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = x"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
| 362 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__SCREAMING_SNAKE_CASE = 77
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 331 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "gpt_bigcode"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=50_257 , __SCREAMING_SNAKE_CASE : Any=1_024 , __SCREAMING_SNAKE_CASE : Optional[int]=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str="gelu_pytorch_tanh" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=1E-5 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=50_256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=50_256 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scale_attn_weights
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = attention_softmax_in_fpaa
__SCREAMING_SNAKE_CASE = scale_attention_softmax_in_fpaa
__SCREAMING_SNAKE_CASE = multi_query
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 363 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase : Optional[int] = 'examples/'
UpperCAmelCase : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : Union[str, Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Tuple = 'README.md'
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ )
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a__ )
def a__ ( a__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" )
def a__ ( a__ , a__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures"""
__SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a__ )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def a__ ( a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(F'Updating version to {version}.' )
global_version_update(a__ , patch=a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(F'Updating version to {version}.' )
global_version_update(a__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 331 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(__SCREAMING_SNAKE_CASE )
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , **__SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
return {}, {}, {}
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model(**__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs.predicted_depth
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prediction.squeeze().cpu().numpy()
__SCREAMING_SNAKE_CASE = (output * 255 / np.max(__SCREAMING_SNAKE_CASE )).astype("""uint8""" )
__SCREAMING_SNAKE_CASE = Image.fromarray(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = predicted_depth
__SCREAMING_SNAKE_CASE = depth
return output_dict
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
'''simple docstring'''
import os
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """triangle.txt""" )
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = []
for line in triangle:
__SCREAMING_SNAKE_CASE = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(a__ ) )
a.append(a__ )
for i in range(1 , len(a__ ) ):
for j in range(len(a[i] ) ):
__SCREAMING_SNAKE_CASE = a[i - 1][j] if j != len(a[i - 1] ) else 0
__SCREAMING_SNAKE_CASE = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a__ , a__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 365 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( a , a , a ):
"""simple docstring"""
lowerCAmelCase__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 50_257 , __SCREAMING_SNAKE_CASE : int = 1_024 , __SCREAMING_SNAKE_CASE : int = 768 , __SCREAMING_SNAKE_CASE : int = 12 , __SCREAMING_SNAKE_CASE : int = 12 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "gelu_new" , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 1E-5 , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
__SCREAMING_SNAKE_CASE = prefix_inner_dim
__SCREAMING_SNAKE_CASE = prefix_hidden_dim
__SCREAMING_SNAKE_CASE = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__SCREAMING_SNAKE_CASE = (
nn.Linear(self.prefix_hidden_dim , __SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__SCREAMING_SNAKE_CASE = GPTaConfig(
vocab_size=__SCREAMING_SNAKE_CASE , n_positions=__SCREAMING_SNAKE_CASE , n_embd=__SCREAMING_SNAKE_CASE , n_layer=__SCREAMING_SNAKE_CASE , n_head=__SCREAMING_SNAKE_CASE , n_inner=__SCREAMING_SNAKE_CASE , activation_function=__SCREAMING_SNAKE_CASE , resid_pdrop=__SCREAMING_SNAKE_CASE , embd_pdrop=__SCREAMING_SNAKE_CASE , attn_pdrop=__SCREAMING_SNAKE_CASE , layer_norm_epsilon=__SCREAMING_SNAKE_CASE , initializer_range=__SCREAMING_SNAKE_CASE , scale_attn_weights=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , scale_attn_by_inverse_layer_idx=__SCREAMING_SNAKE_CASE , reorder_and_upcast_attn=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = GPTaLMHeadModel(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.encode_prefix(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.decode_prefix(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__SCREAMING_SNAKE_CASE = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__SCREAMING_SNAKE_CASE = torch.cat((dummy_token, input_ids) , dim=1 )
__SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.device ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(__SCREAMING_SNAKE_CASE , self.prefix_length , dtype=torch.intaa , device=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
return self.encode_prefix(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.split(__SCREAMING_SNAKE_CASE , 1 , dim=0 )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for feature in features:
__SCREAMING_SNAKE_CASE = self.decode_prefix(feature.to(__SCREAMING_SNAKE_CASE ) ) # back to the clip feature
# Only support beam search for now
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.generate_beam(
input_embeds=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__SCREAMING_SNAKE_CASE = torch.stack(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.stack(__SCREAMING_SNAKE_CASE )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 67 , __SCREAMING_SNAKE_CASE : float = 1.0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.int )
__SCREAMING_SNAKE_CASE = torch.zeros(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , dtype=torch.bool )
if input_embeds is not None:
__SCREAMING_SNAKE_CASE = input_embeds
else:
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = outputs.logits
__SCREAMING_SNAKE_CASE = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__SCREAMING_SNAKE_CASE = logits.softmax(-1 ).log()
if scores is None:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = logits.topk(__SCREAMING_SNAKE_CASE , -1 )
__SCREAMING_SNAKE_CASE = generated.expand(__SCREAMING_SNAKE_CASE , *generated.shape[1:] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__SCREAMING_SNAKE_CASE = next_tokens
else:
__SCREAMING_SNAKE_CASE = tokens.expand(__SCREAMING_SNAKE_CASE , *tokens.shape[1:] )
__SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) , dim=1 )
else:
__SCREAMING_SNAKE_CASE = -float(np.inf )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__SCREAMING_SNAKE_CASE = scores_sum / seq_lengths[:, None]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = scores_sum_average.view(-1 ).topk(__SCREAMING_SNAKE_CASE , -1 )
__SCREAMING_SNAKE_CASE = next_tokens // scores_sum.shape[1]
__SCREAMING_SNAKE_CASE = seq_lengths[next_tokens_source]
__SCREAMING_SNAKE_CASE = next_tokens % scores_sum.shape[1]
__SCREAMING_SNAKE_CASE = next_tokens.unsqueeze(1 )
__SCREAMING_SNAKE_CASE = tokens[next_tokens_source]
__SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) , dim=1 )
__SCREAMING_SNAKE_CASE = generated[next_tokens_source]
__SCREAMING_SNAKE_CASE = scores_sum_average * seq_lengths
__SCREAMING_SNAKE_CASE = is_stopped[next_tokens_source]
__SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__SCREAMING_SNAKE_CASE = torch.cat((generated, next_token_embed) , dim=1 )
__SCREAMING_SNAKE_CASE = is_stopped + next_tokens.eq(__SCREAMING_SNAKE_CASE ).squeeze()
if is_stopped.all():
break
__SCREAMING_SNAKE_CASE = scores / seq_lengths
__SCREAMING_SNAKE_CASE = scores.argsort(descending=__SCREAMING_SNAKE_CASE )
# tokens tensors are already padded to max_seq_length
__SCREAMING_SNAKE_CASE = [tokens[i] for i in order]
__SCREAMING_SNAKE_CASE = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 )
__SCREAMING_SNAKE_CASE = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 366 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase : Optional[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a__ ( a__ , a__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(a__ , a__ )
return k
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["""global_step"""]
for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
UpperCAmelCase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if prompt is not None:
__SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
__SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
__SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__SCREAMING_SNAKE_CASE = None
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name )
__SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
__SCREAMING_SNAKE_CASE = {
"""generated_text""": self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 331 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = '▁'
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
UpperCAmelCase : List[Any] = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
UpperCAmelCase : List[str] = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
UpperCAmelCase : Optional[Any] = {
'ernie-m-base': 5_1_4,
'ernie-m-large': 5_1_4,
}
UpperCAmelCase : List[str] = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ["input_ids"]
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = RESOURCE_FILES_NAMES
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int="utf8" , __SCREAMING_SNAKE_CASE : Optional[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : Any="[SEP]" , __SCREAMING_SNAKE_CASE : Tuple="[PAD]" , __SCREAMING_SNAKE_CASE : str="[CLS]" , __SCREAMING_SNAKE_CASE : Any="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , vocab_file=__SCREAMING_SNAKE_CASE , encoding=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = sentencepiece_model_ckpt
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__SCREAMING_SNAKE_CASE = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if text is None:
return None
__SCREAMING_SNAKE_CASE = self.tokenize(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = """""", []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
__SCREAMING_SNAKE_CASE = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKC""" , __SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = normalized_text, [], 0
if self.do_lower_case:
__SCREAMING_SNAKE_CASE = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__SCREAMING_SNAKE_CASE = token[1:]
__SCREAMING_SNAKE_CASE = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
__SCREAMING_SNAKE_CASE = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__SCREAMING_SNAKE_CASE = end
return token_mapping
@property
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in text) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=0.1 ) -> Union[str, Any]:
"""simple docstring"""
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__SCREAMING_SNAKE_CASE = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__SCREAMING_SNAKE_CASE = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__SCREAMING_SNAKE_CASE = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
__SCREAMING_SNAKE_CASE = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__SCREAMING_SNAKE_CASE = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__SCREAMING_SNAKE_CASE = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
return self.vocab.get(__SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> str:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Any=False ) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
__SCREAMING_SNAKE_CASE = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
with io.open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = line.rstrip("""\n""" )
__SCREAMING_SNAKE_CASE = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__SCREAMING_SNAKE_CASE = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(token + """\n""" )
index += 1
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """sentencepiece.bpe.model""" )
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 368 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {'vocab_file': 'spiece.model'}
UpperCAmelCase : Union[str, Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
UpperCAmelCase : int = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Optional[Any] = 2
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : List[Any] = 4
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = "left"
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<sep>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Optional[int]="<cls>" , __SCREAMING_SNAKE_CASE : Optional[Any]="<mask>" , __SCREAMING_SNAKE_CASE : str=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.remove_space:
__SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() )
else:
__SCREAMING_SNAKE_CASE = inputs
__SCREAMING_SNAKE_CASE = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
__SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = []
sub_texts.append(__SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(__SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__SCREAMING_SNAKE_CASE = """""".join(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__SCREAMING_SNAKE_CASE = self.clean_up_tokenization(__SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 369 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" )
__SCREAMING_SNAKE_CASE = """"""
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 331 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase : List[Any] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase : Optional[int] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase : Tuple = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = compute_bleu(
reference_corpus=__SCREAMING_SNAKE_CASE , translation_corpus=__SCREAMING_SNAKE_CASE , max_order=__SCREAMING_SNAKE_CASE , smooth=__SCREAMING_SNAKE_CASE )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = 13
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 99
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 37
__SCREAMING_SNAKE_CASE = """gelu"""
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0.02
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 9
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFConvBertModel(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__SCREAMING_SNAKE_CASE = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFConvBertModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
if hasattr(__SCREAMING_SNAKE_CASE , """use_cache""" ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(model(__SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """saved_model""" , """1""" )
__SCREAMING_SNAKE_CASE = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = outputs["""encoder_hidden_states"""]
__SCREAMING_SNAKE_CASE = outputs["""encoder_attentions"""]
else:
__SCREAMING_SNAKE_CASE = outputs["""hidden_states"""]
__SCREAMING_SNAKE_CASE = outputs["""attentions"""]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE : List[str] ):
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
__SCREAMING_SNAKE_CASE = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(__SCREAMING_SNAKE_CASE )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = [1, 6, 768]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
| 371 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_choices
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_000
__SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "decision_transformer"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=17 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : str=128 , __SCREAMING_SNAKE_CASE : List[str]=4_096 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Tuple=1_024 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=1E-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=50_256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=50_256 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : str=False , **__SCREAMING_SNAKE_CASE : int , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = state_dim
__SCREAMING_SNAKE_CASE = act_dim
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = max_ep_len
__SCREAMING_SNAKE_CASE = action_tanh
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scale_attn_weights
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
__SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "markuplm"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE = tag_pad_id
__SCREAMING_SNAKE_CASE = subs_pad_id
__SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 331 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def a__ ( a__ = 1_00_00_00 , a__ = 10 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__SCREAMING_SNAKE_CASE = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__SCREAMING_SNAKE_CASE = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 351 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 331 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase : Optional[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a__ ( a__ , a__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(a__ , a__ )
return k
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["""global_step"""]
for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
UpperCAmelCase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 352 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 331 | 0 |
'''simple docstring'''
def a__ ( a__ , a__ , a__ , a__ , a__ , a__ ):
"""simple docstring"""
if index == r:
for j in range(a__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__SCREAMING_SNAKE_CASE = arr[i]
combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a__ , a__ , a__ , 0 , a__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase : List[Any] = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 353 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 331 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase : Optional[int] = re.compile(R'\s+')
def a__ ( a__ ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(a__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [len(a__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(a__ ), "line_max": max(a__ )}
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a__ ( a__ , a__ ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a__ ( a__ , a__=5 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for _, line in zip(range(a__ ) , a__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a__ ( a__ , a__=5 , a__=0.05 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""unit tests""", """test file""", """configuration file"""]
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# first test
for _, line in zip(range(a__ ) , a__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__SCREAMING_SNAKE_CASE = example["""content"""].count("""\n""" )
__SCREAMING_SNAKE_CASE = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""def """, """class """, """for """, """while """]
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a__ ( a__ , a__=4 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
__SCREAMING_SNAKE_CASE = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(a__ )
return {"ratio": ratio}
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
results.update(get_hash(a__ ) )
results.update(line_stats(a__ ) )
results.update(alpha_stats(a__ ) )
results.update(char_token_ratio(a__ ) )
results.update(is_autogenerated(a__ ) )
results.update(is_config_or_test(a__ ) )
results.update(has_no_keywords(a__ ) )
results.update(has_few_assignments(a__ ) )
return results
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if not check_uniques(a__ , a__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a__ ( a__ ):
"""simple docstring"""
with open(a__ , """rb""" ) as f_in:
with gzip.open(str(a__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(a__ , a__ )
os.unlink(a__ )
# Settings
UpperCAmelCase : Dict = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase : int = multiprocessing.cpu_count()
UpperCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase : List[Any] = time.time()
UpperCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase : int = time.time()
UpperCAmelCase : List[Any] = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase : Optional[Any] = set(ds.unique('hash'))
UpperCAmelCase : Dict = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase : Optional[int] = time.time()
UpperCAmelCase : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase : Dict = time.time()
UpperCAmelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase : Union[str, Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase : str = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 354 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( a__ ):
"""simple docstring"""
return x + 2
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
__SCREAMING_SNAKE_CASE = """x = y"""
__SCREAMING_SNAKE_CASE = {"""y""": 5}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = add_two(x)"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} )
__SCREAMING_SNAKE_CASE = {"""x""": 8}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = x"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
| 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Dict = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331 | 0 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a__ ) == len(a__ ), F'{len(a__ )} != {len(a__ )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def a__ ( a__ , a__ ):
"""simple docstring"""
try:
__SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(a__ ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(a__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def a__ ( a__ , a__ = "student" , a__ = None , a__ = None , a__=False , a__=None , a__=None , **a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(a__ , a__ ):
AutoTokenizer.from_pretrained(a__ ).save_pretrained(a__ ) # purely for convenience
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).eval()
else:
assert isinstance(a__ , a__ ), F'teacher must be a model or string got type {type(a__ )}'
__SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict()
try:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__SCREAMING_SNAKE_CASE = teacher_e
if d is None:
__SCREAMING_SNAKE_CASE = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a__ )
# Copy weights
__SCREAMING_SNAKE_CASE = teacher.config_class(**a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(a__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=a__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(range(a__ ) ), list(range(a__ ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(a__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(a__ , a__ )
if d_layers_to_copy is None:
__SCREAMING_SNAKE_CASE = pick_layers_to_copy(a__ , a__ )
try:
if hasattr(
a__ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a__ )
copy_layers(teacher.decoder.block , student.decoder.block , a__ )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
__SCREAMING_SNAKE_CASE = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(a__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 357 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ["audio_values", "audio_mask"]
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=2_048 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=[16, 16] , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=44_100 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1]
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = log_spec - 20.0
__SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__SCREAMING_SNAKE_CASE = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__SCREAMING_SNAKE_CASE = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__SCREAMING_SNAKE_CASE = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
__SCREAMING_SNAKE_CASE = audio_features[i]
__SCREAMING_SNAKE_CASE = feature
# return as BatchFeature
if return_attention_mask:
__SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features}
__SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 358 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( a__ , a__ , a__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
__SCREAMING_SNAKE_CASE = nn.Parameter(a__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
__SCREAMING_SNAKE_CASE = nn.Parameter(a__ )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a__ ).view(-1 , a__ ).contiguous().transpose(0 , 1 ) , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a__ ).view(-1 , a__ ).contiguous().transpose(0 , 1 ) , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = weights[0][0][0]
__SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a__ ) , torch.tensor(a__ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE = weights[0][1]
if len(a__ ) < 4:
set_layer_weights_in_torch_lsh(a__ , torch_block.attention , a__ )
else:
set_layer_weights_in_torch_local(a__ , torch_block.attention , a__ )
# intermediate weighs
__SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(a__ ) == 4:
__SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a__ ) , torch.tensor(a__ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a__ ).transpose(0 , 1 ).contiguous() , torch.tensor(a__ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a__ ).transpose(0 , 1 ).contiguous() , torch.tensor(a__ ) , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a__ ) , )
if isinstance(weights[3] , a__ ):
__SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(a__ ) )
__SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a__ , a__ , a__ )
# output layer norm
__SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a__ ) , torch.tensor(a__ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a__ ).transpose(0 , 1 ).contiguous() , torch.tensor(a__ ) , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(a__ )
print(F'Building PyTorch model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(a__ )
with open(a__ , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pickle.load(a__ )["""weights"""]
set_model_weights_in_torch(a__ , a__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 359 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase : Dict = TypeVar('T')
def a__ ( a__ ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self : Tuple ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def a__ ( a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 331 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase : int = TypeVar('T')
UpperCAmelCase : Optional[int] = Union[List[T], Tuple[T, ...]]
UpperCAmelCase : Optional[int] = Union[T, List[T], Dict[str, T]]
UpperCAmelCase : Optional[Any] = Union[str, bytes, os.PathLike]
| 360 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ["input_features"]
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=80 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16_000 , __SCREAMING_SNAKE_CASE : Optional[int]=160 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : Dict=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=False , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = n_fft
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = chunk_length * sampling_rate
__SCREAMING_SNAKE_CASE = self.n_samples // hop_length
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__SCREAMING_SNAKE_CASE = log_spec[:, :-1]
__SCREAMING_SNAKE_CASE = np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
__SCREAMING_SNAKE_CASE = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE , np.intaa )
__SCREAMING_SNAKE_CASE = []
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE = padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T]
__SCREAMING_SNAKE_CASE = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__SCREAMING_SNAKE_CASE = self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__SCREAMING_SNAKE_CASE = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__SCREAMING_SNAKE_CASE = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__SCREAMING_SNAKE_CASE = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__SCREAMING_SNAKE_CASE = [self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
__SCREAMING_SNAKE_CASE = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__SCREAMING_SNAKE_CASE = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def UpperCAmelCase__ ( self : List[Any] ) -> Dict[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 361 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "retribert"
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 331 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None # sigma(t_i)
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[Any]:
"""simple docstring"""
return cls()
@dataclass
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 100 , __SCREAMING_SNAKE_CASE : float = 1.007 , __SCREAMING_SNAKE_CASE : float = 80 , __SCREAMING_SNAKE_CASE : float = 0.05 , __SCREAMING_SNAKE_CASE : float = 50 , ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return KarrasVeSchedulerState.create()
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple = () ) -> KarrasVeSchedulerState:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = jnp.arange(0 , __SCREAMING_SNAKE_CASE )[::-1].copy()
__SCREAMING_SNAKE_CASE = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , schedule=jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__SCREAMING_SNAKE_CASE = 0
# sample eps ~ N(0, S_noise^2 * I)
__SCREAMING_SNAKE_CASE = random.split(__SCREAMING_SNAKE_CASE , num=1 )
__SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=__SCREAMING_SNAKE_CASE , shape=sample.shape )
__SCREAMING_SNAKE_CASE = sigma + gamma * sigma
__SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output
__SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat
__SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output
__SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev
__SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
| 362 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__SCREAMING_SNAKE_CASE = 77
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 331 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "vision-encoder-decoder"
lowerCAmelCase__ = True
def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__SCREAMING_SNAKE_CASE = kwargs.pop("""encoder""" )
__SCREAMING_SNAKE_CASE = encoder_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = kwargs.pop("""decoder""" )
__SCREAMING_SNAKE_CASE = decoder_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = True
@classmethod
def UpperCAmelCase__ ( cls : int , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : Tuple ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.encoder.to_dict()
__SCREAMING_SNAKE_CASE = self.decoder.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = version.parse("1.11" )
@property
def UpperCAmelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
import torch
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_input["""input_ids"""].shape
__SCREAMING_SNAKE_CASE = (batch, encoder_sequence, self._config.encoder_hidden_size)
__SCREAMING_SNAKE_CASE = dummy_input.pop("""input_ids""" )
__SCREAMING_SNAKE_CASE = dummy_input.pop("""attention_mask""" )
__SCREAMING_SNAKE_CASE = torch.zeros(__SCREAMING_SNAKE_CASE )
return common_inputs
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : str ) -> None:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : PretrainedConfig ) -> OnnxConfig:
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" ) -> OnnxConfig:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 363 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase : Optional[int] = 'examples/'
UpperCAmelCase : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : Union[str, Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Tuple = 'README.md'
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ )
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a__ )
def a__ ( a__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" )
def a__ ( a__ , a__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures"""
__SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a__ )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def a__ ( a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(F'Updating version to {version}.' )
global_version_update(a__ , patch=a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(F'Updating version to {version}.' )
global_version_update(a__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 331 | 0 |
'''simple docstring'''
def a__ ( a__ = 10_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
__SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
__SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
__SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase : Optional[Any] = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : list = None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
__SCREAMING_SNAKE_CASE = os.path.abspath("""examples""" )
for item in os.listdir(__SCREAMING_SNAKE_CASE ):
if item not in EXCLUDE_EXAMPLES:
__SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ) and ".py" in item_path:
with self.subTest(
tested_script=__SCREAMING_SNAKE_CASE , feature_script=__SCREAMING_SNAKE_CASE , tested_section="""main()""" if parser_only else """training_function()""" , ):
__SCREAMING_SNAKE_CASE = compare_against_test(
os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """\n""".join(__SCREAMING_SNAKE_CASE )
if special_strings is not None:
for string in special_strings:
__SCREAMING_SNAKE_CASE = diff.replace(__SCREAMING_SNAKE_CASE , """""" )
self.assertEqual(__SCREAMING_SNAKE_CASE , """""" )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , __SCREAMING_SNAKE_CASE )
self.one_complete_example("""complete_nlp_example.py""" , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
__SCREAMING_SNAKE_CASE = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.one_complete_example("""complete_cv_example.py""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = False
@classmethod
def UpperCAmelCase__ ( cls : List[str] ) -> str:
"""simple docstring"""
super().setUpClass()
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
__SCREAMING_SNAKE_CASE = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCAmelCase__ ( cls : Any ) -> Dict:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=__SCREAMING_SNAKE_CASE )
self.assertNotIn("""epoch 0:""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""epoch 1:""" , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=__SCREAMING_SNAKE_CASE )
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""epoch 1:""" , __SCREAMING_SNAKE_CASE )
else:
self.assertIn("""epoch 0:""" , __SCREAMING_SNAKE_CASE )
self.assertIn("""epoch 1:""" , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = re.findall("""({.+})""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [r for r in results if """accuracy""" in r][-1]
__SCREAMING_SNAKE_CASE = ast.literal_eval(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
__SCREAMING_SNAKE_CASE = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """tracking""" ) ) )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 365 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "bert"
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int=30_522 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Any=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Tuple=1E-12 , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : Any="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class lowerCAmelCase__ ( a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 366 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase : Optional[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a__ ( a__ , a__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(a__ , a__ )
return k
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["""global_step"""]
for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
UpperCAmelCase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 331 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """[PAD]"""
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """[PAD]""" )
self.assertEqual(vocab_keys[1] , """[CLS]""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_012 )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""[UNK]""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""[UNK]""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [35_389, 6_672, 49, 2]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
| 367 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if prompt is not None:
__SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
__SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
__SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__SCREAMING_SNAKE_CASE = None
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name )
__SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
__SCREAMING_SNAKE_CASE = {
"""generated_text""": self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Union[str, Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase__ = Features({"text": Value("string" )} )
lowerCAmelCase__ = Features({"labels": ClassLabel} )
lowerCAmelCase__ = "text"
lowerCAmelCase__ = "labels"
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__SCREAMING_SNAKE_CASE = copy.deepcopy(self )
__SCREAMING_SNAKE_CASE = self.label_schema.copy()
__SCREAMING_SNAKE_CASE = features[self.label_column]
__SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 369 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" )
__SCREAMING_SNAKE_CASE = """"""
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 331 | 0 |
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
__SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__SCREAMING_SNAKE_CASE = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__SCREAMING_SNAKE_CASE = Text("""CPU""" , font_size=24 )
__SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
__SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__SCREAMING_SNAKE_CASE = Text("""GPU""" , font_size=24 )
__SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__SCREAMING_SNAKE_CASE = Text("""Model""" , font_size=24 )
__SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
rect.set_stroke(__SCREAMING_SNAKE_CASE )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 )
self.add(__SCREAMING_SNAKE_CASE )
cpu_targs.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
__SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
__SCREAMING_SNAKE_CASE = Text("""Loaded Checkpoint""" , font_size=24 )
__SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , aligned_edge=__SCREAMING_SNAKE_CASE , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__SCREAMING_SNAKE_CASE = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__SCREAMING_SNAKE_CASE = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE ) , Write(__SCREAMING_SNAKE_CASE ) )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=1 ) , Create(__SCREAMING_SNAKE_CASE , run_time=1 ) )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 )
target.move_to(__SCREAMING_SNAKE_CASE )
first_animations.append(GrowFromCenter(__SCREAMING_SNAKE_CASE , run_time=1 ) )
__SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*__SCREAMING_SNAKE_CASE )
self.play(*__SCREAMING_SNAKE_CASE )
self.wait()
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase : str = 'examples/'
UpperCAmelCase : Any = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : Dict = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Dict = 'README.md'
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ )
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a__ )
def a__ ( a__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" )
def a__ ( a__ , a__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures"""
__SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a__ )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def a__ ( a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(F'Updating version to {version}.' )
global_version_update(a__ , patch=a__ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(F'Updating version to {version}.' )
global_version_update(a__ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 371 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_choices
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_000
__SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=None ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , question_encoder_tokenizer=__SCREAMING_SNAKE_CASE , generator_tokenizer=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , init_retrieval=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__SCREAMING_SNAKE_CASE = self._infer_socket_ifname()
# avoid clash with the NCCL port
__SCREAMING_SNAKE_CASE = str(distributed_port + 1 )
__SCREAMING_SNAKE_CASE = dist.new_group(ranks=__SCREAMING_SNAKE_CASE , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=torch.floataa ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.empty(__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )
dist.scatter(__SCREAMING_SNAKE_CASE , src=0 , scatter_list=__SCREAMING_SNAKE_CASE , group=self.process_group )
return target_tensor
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__SCREAMING_SNAKE_CASE = next((addr for addr in addrs if addr.startswith("""e""" )) , __SCREAMING_SNAKE_CASE )
return ifname
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int ) -> Tuple[np.ndarray, List[dict]]:
"""simple docstring"""
if not dist.is_initialized():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._main_retrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE )
# distributed training
__SCREAMING_SNAKE_CASE = dist.get_world_size(group=self.process_group )
# gather logic
__SCREAMING_SNAKE_CASE = None
if self._is_main():
__SCREAMING_SNAKE_CASE = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__SCREAMING_SNAKE_CASE )]
dist.gather(torch.tensor(__SCREAMING_SNAKE_CASE ) , dst=0 , gather_list=__SCREAMING_SNAKE_CASE , group=self.process_group )
# scatter logic
__SCREAMING_SNAKE_CASE = question_hidden_states.shape[0]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if self._is_main():
assert len(__SCREAMING_SNAKE_CASE ) == world_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._main_retrieve(torch.cat(__SCREAMING_SNAKE_CASE ).numpy() , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ), torch.tensor(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._chunk_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._chunk_tensor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._scattered(__SCREAMING_SNAKE_CASE , [n_queries, n_docs] , target_type=torch.intaa )
__SCREAMING_SNAKE_CASE = self._scattered(__SCREAMING_SNAKE_CASE , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__SCREAMING_SNAKE_CASE )
| 350 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "markuplm"
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE = max_depth
__SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE = tag_pad_id
__SCREAMING_SNAKE_CASE = subs_pad_id
__SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
| 331 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase : Optional[Any] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
return sd
def a__ ( a__ , a__ , a__=rename_keys_prefix ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OrderedDict()
__SCREAMING_SNAKE_CASE = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__SCREAMING_SNAKE_CASE = key
for name_pair in rename_keys_prefix:
__SCREAMING_SNAKE_CASE = new_key.replace(name_pair[0] , name_pair[1] )
__SCREAMING_SNAKE_CASE = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__SCREAMING_SNAKE_CASE = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def a__ ( a__ , a__ ):
"""simple docstring"""
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
__SCREAMING_SNAKE_CASE = """pretraining"""
if "vcr" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 5_12}
elif "vqa_advanced" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 20_48}
elif "vqa" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 20_48}
elif "nlvr" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 10_24}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 5_12}
__SCREAMING_SNAKE_CASE = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 20_48}
__SCREAMING_SNAKE_CASE = """vqa_advanced"""
elif "vqa" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {"""visual_embedding_dim""": 20_48, """num_labels""": 31_29}
__SCREAMING_SNAKE_CASE = """vqa"""
elif "nlvr" in checkpoint_path:
__SCREAMING_SNAKE_CASE = {
"""visual_embedding_dim""": 10_24,
"""num_labels""": 2,
}
__SCREAMING_SNAKE_CASE = """nlvr"""
__SCREAMING_SNAKE_CASE = VisualBertConfig(**a__ )
# Load State Dict
__SCREAMING_SNAKE_CASE = load_state_dict(a__ )
__SCREAMING_SNAKE_CASE = get_new_dict(a__ , a__ )
if model_type == "pretraining":
__SCREAMING_SNAKE_CASE = VisualBertForPreTraining(a__ )
elif model_type == "vqa":
__SCREAMING_SNAKE_CASE = VisualBertForQuestionAnswering(a__ )
elif model_type == "nlvr":
__SCREAMING_SNAKE_CASE = VisualBertForVisualReasoning(a__ )
elif model_type == "multichoice":
__SCREAMING_SNAKE_CASE = VisualBertForMultipleChoice(a__ )
model.load_state_dict(a__ )
# Save Checkpoints
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 351 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 331 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase : Union[str, Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
UpperCAmelCase : Optional[Any] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
UpperCAmelCase : Any = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0.0
for i, j in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 0.0
__SCREAMING_SNAKE_CASE = n_correct / len(__SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 352 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 331 | 0 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = torch.nn.Linear(10 , 10 )
__SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , 0.1 )
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE = accelerator.prepare(__SCREAMING_SNAKE_CASE )
try:
pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 353 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 331 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 )
__SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 354 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : list ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set_counts
__SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [1] * num_sets
__SCREAMING_SNAKE_CASE = list(range(__SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_parent(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_parent(__SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__SCREAMING_SNAKE_CASE = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = src_parent
__SCREAMING_SNAKE_CASE = self.set_counts[src_parent]
__SCREAMING_SNAKE_CASE = max(self.max_set , __SCREAMING_SNAKE_CASE )
return True
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__SCREAMING_SNAKE_CASE = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 355 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( a__ ):
"""simple docstring"""
return x + 2
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
__SCREAMING_SNAKE_CASE = """x = y"""
__SCREAMING_SNAKE_CASE = {"""y""": 5}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = add_two(x)"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} )
__SCREAMING_SNAKE_CASE = {"""x""": 8}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} )
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """y = x"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} )
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
| 331 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase : Tuple = pytest.mark.integration
@require_faiss
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__SCREAMING_SNAKE_CASE ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
__SCREAMING_SNAKE_CASE = dset.map(
lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__SCREAMING_SNAKE_CASE ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__SCREAMING_SNAKE_CASE , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
__SCREAMING_SNAKE_CASE = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
__SCREAMING_SNAKE_CASE = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
__SCREAMING_SNAKE_CASE = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
__SCREAMING_SNAKE_CASE = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search(__SCREAMING_SNAKE_CASE )
self.assertRaises(__SCREAMING_SNAKE_CASE , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__SCREAMING_SNAKE_CASE = np.eye(5 , dtype=np.floataa )[::-1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search_batch(__SCREAMING_SNAKE_CASE )
self.assertRaises(__SCREAMING_SNAKE_CASE , index.search_batch , queries[0] )
__SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores]
__SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__SCREAMING_SNAKE_CASE = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = faiss.IndexFlat(5 )
__SCREAMING_SNAKE_CASE = FaissIndex(custom_index=__SCREAMING_SNAKE_CASE )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__SCREAMING_SNAKE_CASE ) as tmp_file:
index.save(tmp_file.name )
__SCREAMING_SNAKE_CASE = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search(__SCREAMING_SNAKE_CASE )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a__ ( a__ ):
"""simple docstring"""
import faiss
__SCREAMING_SNAKE_CASE = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__SCREAMING_SNAKE_CASE = """index.faiss"""
__SCREAMING_SNAKE_CASE = F'mock://{index_name}'
index.save(a__ , storage_options=mockfs.storage_options )
__SCREAMING_SNAKE_CASE = FaissIndex.load(a__ , storage_options=mockfs.storage_options )
__SCREAMING_SNAKE_CASE = np.zeros(5 , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search(a__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
__SCREAMING_SNAKE_CASE = Elasticsearch()
__SCREAMING_SNAKE_CASE = {"""acknowledged""": True}
__SCREAMING_SNAKE_CASE = ElasticSearchIndex(es_client=__SCREAMING_SNAKE_CASE )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
__SCREAMING_SNAKE_CASE = """foo"""
__SCREAMING_SNAKE_CASE = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search(__SCREAMING_SNAKE_CASE )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__SCREAMING_SNAKE_CASE = """foo"""
__SCREAMING_SNAKE_CASE = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search(__SCREAMING_SNAKE_CASE , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__SCREAMING_SNAKE_CASE = ["""foo""", """bar""", """foobar"""]
__SCREAMING_SNAKE_CASE = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search_batch(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores]
__SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual([1, 1, 1] , __SCREAMING_SNAKE_CASE )
# batched queries with timeout
__SCREAMING_SNAKE_CASE = ["""foo""", """bar""", """foobar"""]
__SCREAMING_SNAKE_CASE = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = index.search_batch(__SCREAMING_SNAKE_CASE , request_timeout=30 )
__SCREAMING_SNAKE_CASE = [scores[0] for scores in total_scores]
__SCREAMING_SNAKE_CASE = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__SCREAMING_SNAKE_CASE ) , 0 )
self.assertListEqual([1, 1, 1] , __SCREAMING_SNAKE_CASE )
| 356 |
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : int = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 | 0 |
import os
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a__ ) , """num.txt""" )
with open(a__ ) as file_hand:
return str(sum(int(a__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 358 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=a ):
"""simple docstring"""
lowerCAmelCase__ = ["flax", "transformers"]
def __init__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCAmelCase__ ( metaclass=a ):
"""simple docstring"""
lowerCAmelCase__ = ["flax", "transformers"]
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCAmelCase__ ( metaclass=a ):
"""simple docstring"""
lowerCAmelCase__ = ["flax", "transformers"]
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class lowerCAmelCase__ ( metaclass=a ):
"""simple docstring"""
lowerCAmelCase__ = ["flax", "transformers"]
def __init__( self : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
| 359 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase : Dict = TypeVar('T')
def a__ ( a__ ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self : Tuple ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def a__ ( a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 331 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : List[str]=30 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Tuple=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTModel(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__SCREAMING_SNAKE_CASE = self.image_size // 2
__SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFViTForImageClassification(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__SCREAMING_SNAKE_CASE = self.image_size // 2
__SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFViTForImageClassification(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _snake_case ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
| 360 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "efficientformer"
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] = [3, 2, 6, 4] , __SCREAMING_SNAKE_CASE : List[int] = [48, 96, 224, 448] , __SCREAMING_SNAKE_CASE : List[bool] = [True, True, True, True] , __SCREAMING_SNAKE_CASE : int = 448 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : int = 7 , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 16 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : float = 1E-5 , __SCREAMING_SNAKE_CASE : str = "gelu" , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 1E-12 , __SCREAMING_SNAKE_CASE : int = 224 , __SCREAMING_SNAKE_CASE : float = 1E-05 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 361 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "retribert"
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 331 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 362 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AltDiffusionPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__SCREAMING_SNAKE_CASE = 77
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
__SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = text_encoder
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 331 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = [6, 6, 6, 6]
__SCREAMING_SNAKE_CASE = 60
__SCREAMING_SNAKE_CASE = [6, 6, 6, 6]
__SCREAMING_SNAKE_CASE = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1_26
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = 255.0
__SCREAMING_SNAKE_CASE = """"""
return config
def a__ ( a__ , a__ ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "conv_first" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""upsample.2""" , """upsample.convolution_1""" )
__SCREAMING_SNAKE_CASE = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
__SCREAMING_SNAKE_CASE = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
__SCREAMING_SNAKE_CASE = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
__SCREAMING_SNAKE_CASE = """swin2sr.""" + name
return name
def a__ ( a__ , a__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(a__ )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = int(key_split[4] )
__SCREAMING_SNAKE_CASE = config.embed_dim
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
pass
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_config(a__ )
__SCREAMING_SNAKE_CASE = SwinaSRForImageSuperResolution(a__ )
model.eval()
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = convert_state_dict(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.load_state_dict(a__ , strict=a__ )
if len(a__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(a__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
__SCREAMING_SNAKE_CASE = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__SCREAMING_SNAKE_CASE = 1_26 if """Jpeg""" in checkpoint_url else 2_56
__SCREAMING_SNAKE_CASE = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__SCREAMING_SNAKE_CASE = transforms(a__ ).unsqueeze(0 )
if config.num_channels == 1:
__SCREAMING_SNAKE_CASE = pixel_values[:, 0, :, :].unsqueeze(1 )
__SCREAMING_SNAKE_CASE = model(a__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 5_12, 5_12] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 5_12, 5_12] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , a__ , atol=1E-3 )
print("""Looks ok!""" )
__SCREAMING_SNAKE_CASE = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
__SCREAMING_SNAKE_CASE = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(a__ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
UpperCAmelCase : List[str] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase : Optional[int] = 'examples/'
UpperCAmelCase : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase : Union[str, Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase : Tuple = 'README.md'
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ )
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(a__ )
def a__ ( a__ ):
"""simple docstring"""
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" )
def a__ ( a__ , a__=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures"""
__SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?"""
with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a__ )
def a__ ( ):
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def a__ ( a__=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(F'Updating version to {version}.' )
global_version_update(a__ , patch=a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' )
if len(a__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(F'Updating version to {version}.' )
global_version_update(a__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 331 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( a__ , a__ ):
"""simple docstring"""
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_text_dataset(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
_check_text_dataset(a__ , a__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ , split=a__ ).read()
_check_text_dataset(a__ , a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if issubclass(a__ , a__ ):
__SCREAMING_SNAKE_CASE = text_path
elif issubclass(a__ , a__ ):
__SCREAMING_SNAKE_CASE = [text_path]
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ ).read()
_check_text_dataset(a__ , a__ )
def a__ ( a__ , a__ , a__=("train",) ):
"""simple docstring"""
assert isinstance(a__ , a__ )
for split in splits:
__SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = TextDatasetReader({"""train""": text_path} , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_text_datasetdict(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = TextDatasetReader({"""train""": text_path} , features=a__ , cache_dir=a__ ).read()
_check_text_datasetdict(a__ , a__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if split:
__SCREAMING_SNAKE_CASE = {split: text_path}
else:
__SCREAMING_SNAKE_CASE = """train"""
__SCREAMING_SNAKE_CASE = {"""train""": text_path, """test""": text_path}
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ ).read()
_check_text_datasetdict(a__ , a__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=a__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=a__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=a__ )
return parser.parse_args()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
__SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__SCREAMING_SNAKE_CASE = script_fpath.stem
__SCREAMING_SNAKE_CASE = importlib.import_module(a__ )
# Patch sys.argv
__SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 365 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""]
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 366 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase : Any = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase : Optional[Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase : Optional[int] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[str] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase : List[Any] = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def a__ ( a__ , a__ ):
"""simple docstring"""
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(a__ , a__ )
return k
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
__SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE]
if any(a__ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(a__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""]
__SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["""global_step"""]
for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : int = parser.parse_args()
UpperCAmelCase : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 331 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def a__ ( a__ ):
"""simple docstring"""
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a__ )
__SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a__ )
EnvironmentCommand.register_subcommand(a__ )
TestCommand.register_subcommand(a__ )
RunBeamCommand.register_subcommand(a__ )
DummyDataCommand.register_subcommand(a__ )
# Parse args
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args()
if not hasattr(a__ , """func""" ):
parser.print_help()
exit(1 )
__SCREAMING_SNAKE_CASE = parse_unknown_args(a__ )
# Run
__SCREAMING_SNAKE_CASE = args.func(a__ , **a__ )
service.run()
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(a )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if prompt is not None:
__SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
__SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
__SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids
__SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__SCREAMING_SNAKE_CASE = None
return model_inputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name )
__SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
__SCREAMING_SNAKE_CASE = {
"""generated_text""": self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , )
}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 331 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 9, 14 # noqa: F841
__SCREAMING_SNAKE_CASE = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__SCREAMING_SNAKE_CASE = defaultdict(a__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__SCREAMING_SNAKE_CASE = mst(a__ )
__SCREAMING_SNAKE_CASE = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__SCREAMING_SNAKE_CASE = tuple(answer[:2] )
__SCREAMING_SNAKE_CASE = tuple(edge[::-1] )
assert edge in result or reverse in result
| 368 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = VideoToVideoSDPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
lowerCAmelCase__ = False
# No `output_type`.
lowerCAmelCase__ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """np"""
__SCREAMING_SNAKE_CASE = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = torch.randn((1, 10, 3, 1_024, 576) , generator=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = video.to("""cuda""" )
__SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
__SCREAMING_SNAKE_CASE = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type="""pt""" ).frames
__SCREAMING_SNAKE_CASE = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 369 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) )
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" )
__SCREAMING_SNAKE_CASE = """"""
with open(a__ ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 331 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def a__ ( a__ ):
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(a__ , a__ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(a__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__SCREAMING_SNAKE_CASE = BitConfig(
conv_layer=a__ , num_labels=10_00 , idalabel=a__ , labelaid=a__ , )
return config
def a__ ( a__ ):
if "stem.conv" in name:
__SCREAMING_SNAKE_CASE = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__SCREAMING_SNAKE_CASE = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__SCREAMING_SNAKE_CASE = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__SCREAMING_SNAKE_CASE = """bit.encoder.""" + name
return name
def a__ ( ):
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def a__ ( a__ , a__ , a__=False ):
__SCREAMING_SNAKE_CASE = get_config(a__ )
# load original model from timm
__SCREAMING_SNAKE_CASE = create_model(a__ , pretrained=a__ )
timm_model.eval()
# load state_dict of original model
__SCREAMING_SNAKE_CASE = timm_model.state_dict()
for key in state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
__SCREAMING_SNAKE_CASE = val.squeeze() if """head""" in key else val
# load HuggingFace model
__SCREAMING_SNAKE_CASE = BitForImageClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# create image processor
__SCREAMING_SNAKE_CASE = create_transform(**resolve_data_config({} , model=a__ ) )
__SCREAMING_SNAKE_CASE = transform.transforms
__SCREAMING_SNAKE_CASE = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=a__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=a__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = transform(a__ ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = processor(a__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(a__ , a__ )
# verify logits
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(a__ )
__SCREAMING_SNAKE_CASE = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__SCREAMING_SNAKE_CASE = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
UpperCAmelCase : Any = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 370 |
'''simple docstring'''
class lowerCAmelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = row
__SCREAMING_SNAKE_CASE = col
__SCREAMING_SNAKE_CASE = graph
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
__SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands.
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
__SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += 1
return count
| 331 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "megatron-bert"
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Dict=29_056 , __SCREAMING_SNAKE_CASE : Tuple=1_024 , __SCREAMING_SNAKE_CASE : int=24 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : int=True , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
| 371 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_choices
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_000
__SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_ = 1_6
lowerCAmelCase_ = 3_2
def __magic_name__ ( A , A = 1_6 , A = "bert-base-cased" ) -> Optional[int]:
snake_case = AutoTokenizer.from_pretrained(A )
snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case = datasets.map(
A , batched=A , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=A , collate_fn=A , batch_size=A )
snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __magic_name__ ( A , A , A , A ) -> str:
model.eval()
snake_case = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**A )
snake_case = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case , snake_case = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
snake_case = metric.compute()
return eval_metric["accuracy"]
def __magic_name__ ( A , A ) -> int:
# Initialize accelerator
snake_case = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config['lr']
snake_case = int(config['num_epochs'] )
snake_case = int(config['seed'] )
snake_case = int(config['batch_size'] )
snake_case = args.model_name_or_path
set_seed(A )
snake_case , snake_case = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
snake_case = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
snake_case = 1
snake_case = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
snake_case = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
snake_case = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case = 0
snake_case = evaluate.load('glue' , 'mrpc' )
snake_case = num_epochs
if args.partial_train_epoch is not None:
snake_case = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case = args.resume_from_checkpoint.split('epoch_' )[1]
snake_case = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case = int(A ) + 1
snake_case = evaluation_loop(A , A , A , A )
accelerator.print('resumed checkpoint performance:' , A )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
snake_case = json.load(A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
snake_case = model(**A )
snake_case = outputs.loss
snake_case = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case = F'''epoch_{epoch}'''
snake_case = os.path.join(args.output_dir , A )
accelerator.save_state(A )
snake_case = evaluation_loop(A , A , A , A )
snake_case = accuracy
snake_case = lr_scheduler.get_lr()[0]
snake_case = optimizer.param_groups[0]['lr']
snake_case = epoch
snake_case = overall_step
accelerator.print(F'''epoch {epoch}:''' , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(A , A )
def __magic_name__ ( ) -> Any:
snake_case = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A , )
parser.add_argument(
'--output_dir' , type=A , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=A , default=A , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=A , default=A , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=A , default=2 , help='Number of train epochs.' , )
snake_case = parser.parse_args()
snake_case = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(A , A )
if __name__ == "__main__":
main()
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCamelCase :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
def __magic_name__ ( ) -> Node | None:
snake_case = Node(1 )
snake_case = Node(2 )
snake_case = Node(3 )
snake_case = Node(4 )
snake_case = Node(5 )
return tree
def __magic_name__ ( A ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __magic_name__ ( A ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __magic_name__ ( A ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __magic_name__ ( A ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __magic_name__ ( A ) -> Sequence[Node | None]:
snake_case = []
if root is None:
return output
snake_case = deque([root] )
while process_queue:
snake_case = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __magic_name__ ( A , A ) -> Sequence[Node | None]:
snake_case = []
def populate_output(A , A ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def __magic_name__ ( A , A ) -> Sequence[Node | None]:
snake_case = []
def populate_output(A , A ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def __magic_name__ ( A ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
snake_case = []
snake_case = 0
snake_case = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
snake_case = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
snake_case = 0
return output
def __magic_name__ ( ) -> None: # Main function for testing.
snake_case = make_tree()
print(F'''In-order Traversal: {inorder(A )}''' )
print(F'''Pre-order Traversal: {preorder(A )}''' )
print(F'''Post-order Traversal: {postorder(A )}''' , '\n' )
print(F'''Height of Tree: {height(A )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 1 |
'''simple docstring'''
def __magic_name__ ( A = 1_0 , A = 2_2 ) -> int:
snake_case = range(1 , A )
snake_case = range(1 , A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"{solution(1_0, 2_2) = }")
| 332 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = (DDPMScheduler,)
def _lowerCamelCase ( self, **lowercase_ ) -> List[str]:
snake_case = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def _lowerCamelCase ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_, beta_end=lowercase_ )
def _lowerCamelCase ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def _lowerCamelCase ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def _lowerCamelCase ( self ) -> int:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_, prediction_type=lowercase_, sample_max_value=lowercase_, )
def _lowerCamelCase ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def _lowerCamelCase ( self ) -> Tuple:
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowercase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _lowerCamelCase ( self ) -> int:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = len(lowercase_ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
snake_case = model(lowercase_, lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(lowercase_, lowercase_, lowercase_, generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(lowercase_ ) )
snake_case = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(prediction_type='v_prediction' )
snake_case = scheduler_class(**lowercase_ )
snake_case = len(lowercase_ )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
snake_case = model(lowercase_, lowercase_ )
# 2. predict previous mean of sample x_t-1
snake_case = scheduler.step(lowercase_, lowercase_, lowercase_, generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case = pred_prev_sample
snake_case = torch.sum(torch.abs(lowercase_ ) )
snake_case = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
snake_case = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
snake_case = -1
else:
snake_case = timesteps[i + 1]
snake_case = scheduler.previous_timestep(lowercase_ )
snake_case = prev_t.item()
self.assertEqual(lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [100, 87, 50, 51, 0]
with self.assertRaises(lowercase_, msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [100, 87, 50, 1, 0]
snake_case = len(lowercase_ )
with self.assertRaises(lowercase_, msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase_, timesteps=lowercase_ )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowercase_ )
snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 332 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase_ = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
lowerCAmelCase_ = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = get_test_to_tester_mapping(lowercase_ )
snake_case = get_test_to_tester_mapping(lowercase_ )
snake_case = {'BertModelTest': 'BertModelTester'}
snake_case = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = get_model_to_test_mapping(lowercase_ )
snake_case = get_model_to_test_mapping(lowercase_ )
snake_case = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
def _lowerCamelCase ( self ) -> str:
snake_case = get_model_to_tester_mapping(lowercase_ )
snake_case = get_model_to_tester_mapping(lowercase_ )
snake_case = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
self.assertEqual(get_test_info.to_json(lowercase_ ), lowercase_ )
| 332 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''Wav2Vec2FeatureExtractor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self, lowercase_, lowercase_ ) -> str:
super().__init__(lowercase_, lowercase_ )
snake_case = self.feature_extractor
snake_case = False
@classmethod
def _lowerCamelCase ( cls, lowercase_, **lowercase_ ) -> Optional[Any]:
try:
return super().from_pretrained(lowercase_, **lowercase_ )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ', lowercase_, )
snake_case = WavaVecaFeatureExtractor.from_pretrained(lowercase_, **lowercase_ )
snake_case = WavaVecaCTCTokenizer.from_pretrained(lowercase_, **lowercase_ )
return cls(feature_extractor=lowercase_, tokenizer=lowercase_ )
def __call__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_, **lowercase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
snake_case = kwargs.pop('raw_speech' )
else:
snake_case = kwargs.pop('audio', lowercase_ )
snake_case = kwargs.pop('sampling_rate', lowercase_ )
snake_case = kwargs.pop('text', lowercase_ )
if len(lowercase_ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
snake_case = self.feature_extractor(lowercase_, *lowercase_, sampling_rate=lowercase_, **lowercase_ )
if text is not None:
snake_case = self.tokenizer(lowercase_, **lowercase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['input_ids']
return inputs
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase_, **lowercase_ )
snake_case = kwargs.pop('input_features', lowercase_ )
snake_case = kwargs.pop('labels', lowercase_ )
if len(lowercase_ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if input_features is not None:
snake_case = self.feature_extractor.pad(lowercase_, *lowercase_, **lowercase_ )
if labels is not None:
snake_case = self.tokenizer.pad(lowercase_, **lowercase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case = labels['input_ids']
return input_features
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_, **lowercase_ )
@contextmanager
def _lowerCamelCase ( self ) -> Union[str, Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
snake_case = True
snake_case = self.tokenizer
yield
snake_case = self.feature_extractor
snake_case = False
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 1 |
'''simple docstring'''
import cva
import numpy as np
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_ ) -> Union[str, Any]:
if k in (0.04, 0.06):
snake_case = k
snake_case = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ) -> str:
return str(self.k )
def _lowerCamelCase ( self, lowercase_ ) -> tuple[cva.Mat, list[list[int]]]:
snake_case = cva.imread(lowercase_, 0 )
snake_case , snake_case = img.shape
snake_case = []
snake_case = img.copy()
snake_case = cva.cvtColor(lowercase_, cva.COLOR_GRAY2RGB )
snake_case , snake_case = np.gradient(lowercase_ )
snake_case = dx**2
snake_case = dy**2
snake_case = dx * dy
snake_case = 0.04
snake_case = self.window_size // 2
for y in range(lowercase_, h - offset ):
for x in range(lowercase_, w - offset ):
snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case = (wxx * wyy) - (wxy**2)
snake_case = wxx + wyy
snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase_ = HarrisCorner(0.04, 3)
lowerCAmelCase_ , lowerCAmelCase_ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 332 |
'''simple docstring'''
def __magic_name__ ( A ) -> float:
return 1_0 - x * x
def __magic_name__ ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('Wrong space!' )
snake_case = a
while (b - a) >= 0.01:
# Find middle point
snake_case = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
snake_case = c
else:
snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 332 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCAmelCase_ = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
lowerCAmelCase_ = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def __magic_name__ ( ) -> Any:
snake_case = calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(A , A )
snake_case = calculate_rouge(A , A , bootstrap_aggregation=A , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def __magic_name__ ( ) -> Tuple:
snake_case = 'rougeLsum'
snake_case = calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
snake_case = calculate_rouge(A , A , newline_sep=A , rouge_keys=[k] )[k]
assert score > score_no_sep
def __magic_name__ ( ) -> List[str]:
snake_case = ['rouge1', 'rouge2', 'rougeL']
snake_case = calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
snake_case = calculate_rouge(A , A , newline_sep=A , rouge_keys=A )
assert score_sep == score_no_sep
def __magic_name__ ( ) -> Optional[Any]:
snake_case = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
snake_case = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(A , A , newline_sep=A ) == calculate_rouge(A , A , newline_sep=A )
def __magic_name__ ( ) -> List[str]:
snake_case = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
snake_case = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
snake_case = calculate_rouge(A , A , rouge_keys=['rougeLsum'] , newline_sep=A )['rougeLsum']
snake_case = calculate_rouge(A , A , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def __magic_name__ ( ) -> int:
snake_case = Path('examples/seq2seq/test_data/wmt_en_ro' )
snake_case = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(A , A )
snake_case = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=A )
assert isinstance(A , A )
| 332 |
'''simple docstring'''
import pytest
lowerCAmelCase_ = "__dummy_dataset1__"
lowerCAmelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __magic_name__ ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __magic_name__ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __magic_name__ ( A , A , A ) -> Optional[int]:
snake_case = dataset_loading_script_name
snake_case = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=A )
snake_case = script_dir / F'''{script_name}.py'''
with open(A , 'w' ) as f:
f.write(A )
return str(A )
| 332 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __magic_name__ ( A ) -> int:
snake_case = botoa.client('iam' )
snake_case = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=A , AssumeRolePolicyDocument=json.dumps(A , indent=2 ) )
snake_case = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=A , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(A , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def __magic_name__ ( A ) -> Any:
snake_case = botoa.client('iam' )
return iam_client.get_role(RoleName=A )["Role"]["Arn"]
def __magic_name__ ( ) -> List[str]:
snake_case = _ask_options(
'How do you want to authorize?' , ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] , A , )
snake_case = None
if credentials_configuration == 0:
snake_case = _ask_field('Enter your AWS Profile name: [default] ' , default='default' )
snake_case = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
snake_case = _ask_field('AWS Access Key ID: ' )
snake_case = aws_access_key_id
snake_case = _ask_field('AWS Secret Access Key: ' )
snake_case = aws_secret_access_key
snake_case = _ask_field('Enter your AWS Region: [us-east-1]' , default='us-east-1' )
snake_case = aws_region
snake_case = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' , ['Provide IAM Role name', 'Create new IAM role using credentials'] , A , )
if role_management == 0:
snake_case = _ask_field('Enter your IAM role name: ' )
else:
snake_case = 'accelerate_sagemaker_execution_role'
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(A )
snake_case = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = None
if is_custom_docker_image:
snake_case = _ask_field('Enter your Docker image: ' , lambda A : str(A ).lower() )
snake_case = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = None
if is_sagemaker_inputs_enabled:
snake_case = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' , lambda A : str(A ).lower() , )
snake_case = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = None
if is_sagemaker_metrics_enabled:
snake_case = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' , lambda A : str(A ).lower() , )
snake_case = _ask_options(
'What is the distributed mode?' , ['No distributed training', 'Data parallelism'] , _convert_sagemaker_distributed_mode , )
snake_case = {}
snake_case = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
if use_dynamo:
snake_case = 'dynamo_'
snake_case = _ask_options(
'Which dynamo backend would you like to use?' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
if use_custom_options:
snake_case = _ask_options(
'Which mode do you want to use?' , A , lambda A : TORCH_DYNAMO_MODES[int(A )] , default='default' , )
snake_case = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' , _convert_yes_no_to_bool , default=A , error_message='Please enter yes or no.' , )
snake_case = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
snake_case = _ask_options(
A , A , lambda A : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case = _ask_field(A , lambda A : str(A ).lower() , default='ml.p3.2xlarge' )
snake_case = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case = _ask_field(
'How many machines do you want use? [1]: ' , A , default=1 , )
snake_case = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' , ['no', 'fp16', 'bf16', 'fp8'] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=A , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=A , use_cpu=A , dynamo_config=A , eca_instance_type=A , profile=A , region=A , iam_role_name=A , mixed_precision=A , num_machines=A , sagemaker_inputs_file=A , sagemaker_metrics_file=A , )
| 332 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase_ = Lock()
def __magic_name__ ( A , A , A , A , A , A , A ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case = min(A , A )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case = max(A , A )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A )
def __magic_name__ ( A ) -> str:
snake_case = []
snake_case = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case = temp_rs
snake_case = temp_rr
for i in range(1 , len(A ) - 1 ):
snake_case = Pipe()
snake_case = Pipe()
process_array_.append(
Process(
target=A , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case = temp_rs
snake_case = temp_rr
process_array_.append(
Process(
target=A , args=(
len(A ) - 1,
arr[len(A ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A ) ):
snake_case = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __magic_name__ ( ) -> Tuple:
snake_case = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*A )
snake_case = odd_even_transposition(A )
print('Sorted List\n' )
print(*A )
if __name__ == "__main__":
main()
| 332 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __magic_name__ ( ) -> Optional[int]:
raise RuntimeError('CUDA out of memory.' )
class lowerCamelCase ( nn.Module ):
def __init__( self ) -> Optional[int]:
super().__init__()
snake_case = nn.Linear(3, 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4, 5 )
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_, [128, 64, 32, 16, 8] )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_, lowercase_ ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case , snake_case = mock_training_loop_function('hello' )
self.assertListEqual(lowercase_, [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga], [8, 'hello'] )
def _lowerCamelCase ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_, lowercase_, lowercase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128, 'hello', 'world' )
self.assertIn('Batch size was passed into `f`', cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')', cm.exception.args[0] )
def _lowerCamelCase ( self ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!', cm.exception.args[0] )
@require_cuda
def _lowerCamelCase ( self ) -> int:
snake_case = torch.cuda.memory_allocated()
snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated(), lowercase_ )
snake_case = release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated(), lowercase_ )
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332 | 1 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> bool:
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def __magic_name__ ( ) -> str:
from torch.utils.cpp_extension import load
snake_case = Path(A ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
snake_case = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , A , with_cuda=A , extra_include_paths=[str(A )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 332 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( A ) -> Tuple:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( A , A ) -> Optional[int]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( A ) -> List[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') )
return token
def __magic_name__ ( ) -> Dict:
snake_case = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __magic_name__ ( A , A , A , A ) -> int:
snake_case = 'imagenet-1k-id2label.json'
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(A , A , repo_type='dataset' ) ) , 'r' ) )
snake_case = {int(A ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=A , idalabel=A , labelaid=A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 2_0]
snake_case = [3, 1_2, 1_6]
snake_case = [1_9_2, 7_6_8, 1_0_2_4]
snake_case = CvtForImageClassification(A )
snake_case = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
snake_case = image_size
snake_case = torch.load(A , map_location=torch.device('cpu' ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(A )
snake_case = list_of_state_dict + embeddings(A )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(A , A )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A )
for i in range(len(A ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 332 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class lowerCamelCase ( __lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case_ = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
snake_case_ = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
snake_case_ = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
snake_case_ = "question"
snake_case_ = "context"
snake_case_ = "answers"
@property
def _lowerCamelCase ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 332 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( __lowerCAmelCase , unittest.TestCase ):
snake_case_ = FunnelTokenizer
snake_case_ = FunnelTokenizerFast
snake_case_ = True
snake_case_ = True
def _lowerCamelCase ( self ) -> str:
super().setUp()
snake_case = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowerCamelCase ( self, **lowercase_ ) -> Tuple:
return FunnelTokenizer.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Optional[int]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = 'UNwant\u00E9d,running'
snake_case = 'unwanted, running'
return input_text, output_text
def _lowerCamelCase ( self ) -> Dict:
snake_case = self.tokenizer_class(self.vocab_file )
snake_case = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowercase_, ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ), [7, 4, 5, 10, 8, 9] )
def _lowerCamelCase ( self ) -> Any:
snake_case = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
snake_case = tokenizer('UNwant\u00E9d,running' )
snake_case = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len )
snake_case = tokenizer('UNwant\u00E9d,running', 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'], [2] + [0] * sentence_len + [1] * sentence_len )
| 332 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' )
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ) -> List[str]:
snake_case = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowercase_ )
@slow
def _lowerCamelCase ( self ) -> List[Any]:
self.resolver.convert_models(['heb-eng'] )
@slow
def _lowerCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.resolver.write_model_card('opus-mt-he-en', dry_run=lowercase_ )
assert mmeta["long_pair"] == "heb-eng"
| 332 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger("transformers.models.encodec")
lowerCAmelCase_ = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
lowerCAmelCase_ = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
lowerCAmelCase_ = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
lowerCAmelCase_ = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
lowerCAmelCase_ = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
lowerCAmelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __magic_name__ ( A , A , A , A , A ) -> int:
for attribute in key.split('.' ):
snake_case = getattr(A , A )
if weight_type is not None:
snake_case = getattr(A , A ).shape
else:
snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
elif weight_type == "running_mean":
snake_case = value
elif weight_type == "running_var":
snake_case = value
elif weight_type == "num_batches_tracked":
snake_case = value
elif weight_type == "weight_ih_l0":
snake_case = value
elif weight_type == "weight_hh_l0":
snake_case = value
elif weight_type == "bias_ih_l0":
snake_case = value
elif weight_type == "bias_hh_l0":
snake_case = value
elif weight_type == "weight_ih_l1":
snake_case = value
elif weight_type == "weight_hh_l1":
snake_case = value
elif weight_type == "bias_ih_l1":
snake_case = value
elif weight_type == "bias_hh_l1":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def __magic_name__ ( A , A ) -> str:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case , snake_case = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(A , A ):
logger.info(F'''{name} was ignored''' )
continue
snake_case = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case , snake_case = key.split('.*.' )
if prefix in name and suffix in name:
snake_case = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
snake_case = True
if "*" in mapped_key:
snake_case = name.split(A )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , A )
if "weight_g" in name:
snake_case = 'weight_g'
elif "weight_v" in name:
snake_case = 'weight_v'
elif "weight_ih_l0" in name:
snake_case = 'weight_ih_l0'
elif "weight_hh_l0" in name:
snake_case = 'weight_hh_l0'
elif "bias_ih_l0" in name:
snake_case = 'bias_ih_l0'
elif "bias_hh_l0" in name:
snake_case = 'bias_hh_l0'
elif "weight_ih_l1" in name:
snake_case = 'weight_ih_l1'
elif "weight_hh_l1" in name:
snake_case = 'weight_hh_l1'
elif "bias_ih_l1" in name:
snake_case = 'bias_ih_l1'
elif "bias_hh_l1" in name:
snake_case = 'bias_hh_l1'
elif "bias" in name:
snake_case = 'bias'
elif "weight" in name:
snake_case = 'weight'
elif "running_mean" in name:
snake_case = 'running_mean'
elif "running_var" in name:
snake_case = 'running_var'
elif "num_batches_tracked" in name:
snake_case = 'num_batches_tracked'
else:
snake_case = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def __magic_name__ ( A , A , A , A=None , A=None , ) -> List[str]:
if config_path is not None:
snake_case = EncodecConfig.from_pretrained(A )
else:
snake_case = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case = [8, 5, 4, 4]
snake_case = [2.2]
snake_case = 6_4
snake_case = 3_2_0_0_0
snake_case = 2_0_4_8
snake_case = False
snake_case = False
snake_case = False
elif model_name == "encodec_48khz":
snake_case = [8, 5, 4, 2]
snake_case = [3.0, 6.0, 12.0, 24.0]
snake_case = 4_8_0_0_0
snake_case = 2
snake_case = False
snake_case = 'time_group_norm'
snake_case = True
snake_case = 1.0
snake_case = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
snake_case = EncodecModel(A )
snake_case = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A )
snake_case = torch.load(A )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case = original_checkpoint['best_state']
recursively_load_weights(A , A , A )
model.save_pretrained(A )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(A )
model.push_to_hub(A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 332 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332 | 1 |
'''simple docstring'''
import numpy as np
def __magic_name__ ( A ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 1 |
'''simple docstring'''
def __magic_name__ ( A = 1_0_0 ) -> int:
snake_case = 0
snake_case = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 332 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, *lowercase_, **lowercase_ ) -> None:
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 332 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ['''pixel_values''']
def __init__( self, lowercase_ = True, lowercase_ = None, lowercase_ = PILImageResampling.BICUBIC, lowercase_ = True, lowercase_ = None, lowercase_ = True, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = None, lowercase_ = None, lowercase_ = True, **lowercase_, ) -> None:
super().__init__(**lowercase_ )
snake_case = size if size is not None else {'shortest_edge': 224}
snake_case = get_size_dict(lowercase_, default_to_square=lowercase_ )
snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case = get_size_dict(lowercase_, default_to_square=lowercase_, param_name='crop_size' )
snake_case = do_resize
snake_case = size
snake_case = resample
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case = do_convert_rgb
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = PILImageResampling.BICUBIC, lowercase_ = None, **lowercase_, ) -> np.ndarray:
snake_case = get_size_dict(lowercase_, default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case = get_resize_output_image_size(lowercase_, size=size['shortest_edge'], default_to_square=lowercase_ )
return resize(lowercase_, size=lowercase_, resample=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray:
snake_case = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_, size=(size['height'], size['width']), data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> Union[str, Any]:
return rescale(lowercase_, scale=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray:
return normalize(lowercase_, mean=lowercase_, std=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> PIL.Image.Image:
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = size if size is not None else self.size
snake_case = get_size_dict(lowercase_, param_name='size', default_to_square=lowercase_ )
snake_case = resample if resample is not None else self.resample
snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case = crop_size if crop_size is not None else self.crop_size
snake_case = get_size_dict(lowercase_, param_name='crop_size', default_to_square=lowercase_ )
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case = [self.resize(image=lowercase_, size=lowercase_, resample=lowercase_ ) for image in images]
if do_center_crop:
snake_case = [self.center_crop(image=lowercase_, size=lowercase_ ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=lowercase_, scale=lowercase_ ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=lowercase_, mean=lowercase_, std=lowercase_ ) for image in images]
snake_case = [to_channel_dimension_format(lowercase_, lowercase_ ) for image in images]
snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase_, tensor_type=lowercase_ )
| 332 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_ = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_ = dataset.iloc[:, 1:2].values
lowerCAmelCase_ = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_ = PolynomialFeatures(degree=4)
lowerCAmelCase_ = poly_reg.fit_transform(X)
lowerCAmelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def __magic_name__ ( ) -> Any:
plt.scatter(A , A , color='red' )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 332 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332 | 1 |
'''simple docstring'''
import pprint
import requests
lowerCAmelCase_ = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowerCAmelCase_ = random_quotes()
pprint.pprint(response)
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332 | 1 |
'''simple docstring'''
def __magic_name__ ( A ) -> List[Any]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
snake_case = len(A )
snake_case = max(A )
snake_case = min(A )
# create the counting array
snake_case = coll_max + 1 - coll_min
snake_case = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , A ):
snake_case = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , A ) ):
snake_case = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __magic_name__ ( A ) -> List[Any]:
return "".join([chr(A ) for i in counting_sort([ord(A ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
lowerCAmelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 332 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self, lowercase_ = 3, lowercase_ = 3, lowercase_ = ("DownEncoderBlock2D",), lowercase_ = ("UpDecoderBlock2D",), lowercase_ = (64,), lowercase_ = 1, lowercase_ = "silu", lowercase_ = 3, lowercase_ = 32, lowercase_ = 256, lowercase_ = 32, lowercase_ = None, lowercase_ = 0.18_215, lowercase_ = "group", ) -> str:
super().__init__()
# pass init params to Encoder
snake_case = Encoder(
in_channels=lowercase_, out_channels=lowercase_, down_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, double_z=lowercase_, )
snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
snake_case = VectorQuantizer(lowercase_, lowercase_, beta=0.25, remap=lowercase_, sane_index_shape=lowercase_ )
snake_case = nn.Convad(lowercase_, lowercase_, 1 )
# pass init params to Decoder
snake_case = Decoder(
in_channels=lowercase_, out_channels=lowercase_, up_block_types=lowercase_, block_out_channels=lowercase_, layers_per_block=lowercase_, act_fn=lowercase_, norm_num_groups=lowercase_, norm_type=lowercase_, )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> VQEncoderOutput:
snake_case = self.encoder(lowercase_ )
snake_case = self.quant_conv(lowercase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_ )
@apply_forward_hook
def _lowerCamelCase ( self, lowercase_, lowercase_ = False, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case , snake_case , snake_case = self.quantize(lowercase_ )
else:
snake_case = h
snake_case = self.post_quant_conv(lowercase_ )
snake_case = self.decoder(lowercase_, quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case = sample
snake_case = self.encode(lowercase_ ).latents
snake_case = self.decode(lowercase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_ )
| 332 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 332 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __magic_name__ ( A = 2_0_0_0_0_0_0 ) -> int:
snake_case = [0]
snake_case = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case = 0
# the area corresponding to the grid that gives the product closest to target
snake_case = 0
# an estimate of b, using the quadratic formula
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the largest integer less than b_estimate
snake_case = 42
# the triangle number corresponding to b_floor
snake_case = 42
# the triangle number corresponding to b_ceil
snake_case = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case = floor(A )
snake_case = ceil(A )
snake_case = triangle_numbers[b_floor]
snake_case = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_first_guess * triangle_a
snake_case = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case = triangle_b_second_guess * triangle_a
snake_case = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 332 | 1 |
'''simple docstring'''
import os
def __magic_name__ ( A ) -> List[Any]:
snake_case = len(grid[0] )
snake_case = len(A )
snake_case = 0
snake_case = 0
snake_case = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(A ):
for j in range(n_rows - 3 ):
snake_case = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case = max(
A , A , A , A )
if max_product > largest:
snake_case = max_product
return largest
def __magic_name__ ( ) -> int:
snake_case = []
with open(os.path.dirname(A ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
snake_case = [[int(A ) for i in grid[j]] for j in range(len(A ) )]
return largest_product(A )
if __name__ == "__main__":
print(solution())
| 332 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 332 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase_ = False
try:
lowerCAmelCase_ = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class lowerCamelCase :
def __init__( self, lowercase_ = None, lowercase_ = [] ) -> Union[str, Any]:
snake_case = 0
snake_case = choices
snake_case = prompt
if sys.platform == "win32":
snake_case = '*'
else:
snake_case = '➔ '
def _lowerCamelCase ( self, lowercase_, lowercase_ = "" ) -> Union[str, Any]:
if sys.platform != "win32":
writeColor(self.choices[index], 32, lowercase_ )
else:
forceWrite(self.choices[index], lowercase_ )
def _lowerCamelCase ( self, lowercase_ ) -> Optional[Any]:
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(lowercase_ )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def _lowerCamelCase ( self, lowercase_, lowercase_ = 1 ) -> List[Any]:
snake_case = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowercase_ )
move_cursor(lowercase_, direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _lowerCamelCase ( self ) -> Optional[int]:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _lowerCamelCase ( self ) -> str:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _lowerCamelCase ( self ) -> Any:
move_cursor(len(self.choices ) - self.position, 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _lowerCamelCase ( self ) -> Any:
move_cursor(len(self.choices ) - self.position, 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowercase_ )] for number in range(10 )] )
def _lowerCamelCase ( self ) -> Tuple:
snake_case = int(chr(self.current_selection ) )
snake_case = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP, -movement )
elif self.position < index:
self.move_direction(Direction.DOWN, lowercase_ )
else:
return
else:
return
def _lowerCamelCase ( self, lowercase_ = 0 ) -> Union[str, Any]:
if self.prompt:
linebreak()
forceWrite(self.prompt, '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter', '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter', '\n' )
snake_case = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowercase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position, 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
snake_case = int(builtins.input() )
except ValueError:
snake_case = default_choice
else:
snake_case = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1, 'UP' )
clear_line()
self.write_choice(lowercase_, '\n' )
return choice
| 332 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
lowerCAmelCase_ = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
lowerCAmelCase_ = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
lowerCAmelCase_ = BeautifulSoup(res.text, "html.parser")
lowerCAmelCase_ = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 332 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( A , A , A ) -> Any:
# Initialise PyTorch model
snake_case = BertConfig.from_json_file(A )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case = BertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A , A , A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 332 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self, **lowercase_ ) -> Tuple:
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> List[str]:
return super().__call__(lowercase_, **lowercase_ )
def _lowerCamelCase ( self, **lowercase_ ) -> Tuple:
snake_case = {}
if "candidate_labels" in kwargs:
snake_case = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
snake_case = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case = requests.get(lowercase_ ).content
else:
with open(lowercase_, 'rb' ) as f:
snake_case = f.read()
if isinstance(lowercase_, lowercase_ ):
snake_case = ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
snake_case = self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='pt' )
snake_case = candidate_labels
snake_case = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case = self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
snake_case = [text_inputs]
return inputs
def _lowerCamelCase ( self, lowercase_ ) -> Dict:
snake_case = model_inputs.pop('candidate_labels' )
snake_case = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0], lowercase_ ):
snake_case = text_inputs[0]
else:
# Batching case.
snake_case = text_inputs[0][0]
snake_case = self.model(**lowercase_, **lowercase_ )
snake_case = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def _lowerCamelCase ( self, lowercase_ ) -> Tuple:
snake_case = model_outputs.pop('candidate_labels' )
snake_case = model_outputs['logits'][0]
if self.framework == "pt":
snake_case = logits.softmax(dim=0 )
snake_case = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
snake_case = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.