code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :List[str]=13 , lowerCamelCase__ :Any=7 , lowerCamelCase__ :Dict=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :Optional[Any]=32 , lowerCamelCase__ :Any=5 , lowerCamelCase__ :Any=4 , lowerCamelCase__ :List[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :Tuple=5_12 , lowerCamelCase__ :int=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Optional[int]=0.02 , lowerCamelCase__ :Optional[Any]=4 , ):
UpperCamelCase__ :Union[str, Any] = parent
UpperCamelCase__ :Optional[Any] = batch_size
UpperCamelCase__ :Optional[Any] = seq_length
UpperCamelCase__ :str = is_training
UpperCamelCase__ :int = use_attention_mask
UpperCamelCase__ :Dict = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[str] = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :int = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Dict = intermediate_size
UpperCamelCase__ :str = hidden_act
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :Dict = type_sequence_label_size
UpperCamelCase__ :Dict = initializer_range
UpperCamelCase__ :Union[str, Any] = num_choices
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Optional[Any] = None
if self.use_attention_mask:
UpperCamelCase__ :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :List[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :Optional[int] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = config_and_inputs
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Optional[Any] = True
UpperCamelCase__ :Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = True
_snake_case : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = FlaxRobertaModelTester(self )
@slow
def __a ( self :Any ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ :int = model_class_name.from_pretrained("""roberta-base""" , from_pt=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
import pprint
import requests
UpperCamelCase = "https://zenquotes.io/api"
def A ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def A ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
UpperCamelCase = random_quotes()
pprint.pprint(response)
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
UpperCamelCase = logging.getLogger(__name__)
def A ( ) -> int:
UpperCamelCase__ :Union[str, Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowercase__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowercase__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowercase__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowercase__ , default="""data/dump""" , help="""The dump file prefix.""" )
UpperCamelCase__ :Dict = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCamelCase__ :Dict = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ :Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
UpperCamelCase__ :Dict = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase__ :List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ :Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
UpperCamelCase__ :Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase__ :int = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ :Optional[Any] = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
UpperCamelCase__ :Optional[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
UpperCamelCase__ :Any = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"""{len(lowercase__ )} examples to process.""" )
UpperCamelCase__ :str = []
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :Optional[int] = 1_0000
UpperCamelCase__ :Optional[int] = time.time()
for text in data:
UpperCamelCase__ :Any = f"""{bos} {text.strip()} {sep}"""
UpperCamelCase__ :Dict = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
rslt.append(lowercase__ )
iter += 1
if iter % interval == 0:
UpperCamelCase__ :int = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCamelCase__ :Optional[int] = time.time()
logger.info("""Finished binarization""" )
logger.info(f"""{len(lowercase__ )} examples processed.""" )
UpperCamelCase__ :Any = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCamelCase__ :Any = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCamelCase__ :List[str] = [np.uintaa(lowercase__ ) for d in rslt]
else:
UpperCamelCase__ :List[str] = [np.intaa(lowercase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(lowercase__ , """wb""" ) as handle:
pickle.dump(rslt_ , lowercase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
def A ( lowercase__ : int , lowercase__ : float , lowercase__ : float ) -> float:
return round(float(moles / volume ) * nfactor )
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : str = """resnet"""
_snake_case : Any = ["""basic""", """bottleneck"""]
def __init__( self :List[Any] , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :str=64 , lowerCamelCase__ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ :List[str]=[3, 4, 6, 3] , lowerCamelCase__ :Any="bottleneck" , lowerCamelCase__ :List[Any]="relu" , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :int=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :Optional[int] , ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
UpperCamelCase__ :List[str] = num_channels
UpperCamelCase__ :List[Any] = embedding_size
UpperCamelCase__ :str = hidden_sizes
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = layer_type
UpperCamelCase__ :List[str] = hidden_act
UpperCamelCase__ :int = downsample_in_first_stage
UpperCamelCase__ :str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = version.parse("""1.11""" )
@property
def __a ( self :str ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self :Dict ):
return 1e-3
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
_snake_case : Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
_snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} )
_snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
_snake_case : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
_snake_case : Optional[int] = field(
default=10_000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
_snake_case : Optional[float] = field(default=2e-4 , metadata={"""help""": """Learning rate fo training."""} )
_snake_case : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
_snake_case : Optional[int] = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
_snake_case : Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
_snake_case : Optional[bool] = field(
default=lowercase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
_snake_case : Optional[int] = field(default=50_000 , metadata={"""help""": """Maximum number of training steps."""} )
_snake_case : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_snake_case : Optional[int] = field(default=1_024 , metadata={"""help""": """Sequence lengths used for training."""} )
_snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} )
_snake_case : Optional[int] = field(
default=1_024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
_snake_case : Optional[bool] = field(default=lowercase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
_snake_case : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
_snake_case : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_snake_case : Optional[int] = field(default=1_024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
_snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
_snake_case : Optional[int] = field(default=lowercase , metadata={"""help""": """Number of workers used for code evaluation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
_snake_case : Optional[bool] = field(
default=lowercase , metadata={"""help""": """Sample from the language model's output distribution."""} )
_snake_case : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
_snake_case : Optional[int] = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
_snake_case : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
_snake_case : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
_snake_case : Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
_snake_case : Optional[int] = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
_snake_case : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
_snake_case : Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
_snake_case : Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
_snake_case : Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
_snake_case : Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
_snake_case : Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
_snake_case : Optional[int] = field(
default=100_000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
_snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
_snake_case : Optional[float] = field(
default=1_000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
_snake_case : Optional[float] = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
_snake_case : Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
_snake_case : Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
_snake_case : Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
_snake_case : Optional[bool] = field(
default=lowercase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
_snake_case : Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
_snake_case : Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
_snake_case : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
_snake_case : Optional[int] = field(default=200_000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
_snake_case : Optional[int] = field(
default=32_768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
_snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
_snake_case : Optional[bool] = field(default=lowercase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
_snake_case : Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
_snake_case : Optional[int] = field(default=lowercase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
_snake_case : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
_snake_case : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
_snake_case : Optional[bool] = field(default=lowercase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
from collections.abc import Sequence
def A ( lowercase__ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
UpperCamelCase__ :int = nums[0]
for i in range(1 , len(lowercase__ ) ):
UpperCamelCase__ :str = nums[i]
UpperCamelCase__ :List[str] = max(lowercase__ , ans + num , lowercase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase = int(input("Enter number of elements : ").strip())
UpperCamelCase = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int=13 , lowerCamelCase__ :str=32 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :int=16 , lowerCamelCase__ :Union[str, Any]=[32, 64, 1_28] , lowerCamelCase__ :List[str]=[1, 2, 1] , lowerCamelCase__ :List[Any]=[2, 2, 4] , lowerCamelCase__ :Any=2 , lowerCamelCase__ :Any=2.0 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[Any]="gelu" , lowerCamelCase__ :str=False , lowerCamelCase__ :int=True , lowerCamelCase__ :List[str]=0.02 , lowerCamelCase__ :str=1e-5 , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Any=10 , lowerCamelCase__ :Optional[int]=8 , lowerCamelCase__ :str=["stage1", "stage2"] , lowerCamelCase__ :Optional[int]=[1, 2] , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Union[str, Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :List[Any] = num_channels
UpperCamelCase__ :Union[str, Any] = embed_dim
UpperCamelCase__ :List[Any] = hidden_sizes
UpperCamelCase__ :List[str] = depths
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Union[str, Any] = window_size
UpperCamelCase__ :int = mlp_ratio
UpperCamelCase__ :str = qkv_bias
UpperCamelCase__ :Any = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = drop_path_rate
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :str = use_absolute_embeddings
UpperCamelCase__ :Dict = patch_norm
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Optional[Any] = is_training
UpperCamelCase__ :str = scope
UpperCamelCase__ :List[Any] = use_labels
UpperCamelCase__ :Optional[int] = type_sequence_label_size
UpperCamelCase__ :str = encoder_stride
UpperCamelCase__ :int = out_features
UpperCamelCase__ :str = out_indices
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Tuple = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def __a ( self :Any ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :List[Any] = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__ :Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __a ( self :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :str = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase__ :str = None
UpperCamelCase__ :Optional[Any] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :int = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __a ( self :Optional[int] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any ):
UpperCamelCase__ :List[Any] = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ :List[str] = 1
UpperCamelCase__ :List[str] = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self :Any , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :List[Any] = self.type_sequence_label_size
UpperCamelCase__ :int = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Optional[Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :int ):
UpperCamelCase__ :Dict = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = config_and_inputs
UpperCamelCase__ :List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_snake_case : Any = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Optional[Any] = False
_snake_case : int = False
_snake_case : Any = False
_snake_case : List[Any] = False
_snake_case : Optional[Any] = False
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = FocalNetModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def __a ( self :Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :Union[str, Any] ):
return
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __a ( self :Dict ):
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __a ( self :str ):
pass
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def __a ( self :Tuple ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
UpperCamelCase__ :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Dict = [*signature.parameters.keys()]
UpperCamelCase__ :Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :int = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ :Dict = outputs.hidden_states
UpperCamelCase__ :Optional[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
UpperCamelCase__ :Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase__ :int = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = reshaped_hidden_states[0].shape
UpperCamelCase__ :Any = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Optional[int] = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Optional[int] = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :str ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Dict = 3
UpperCamelCase__ :Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__ :List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__ :Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__ :Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__ :Tuple = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Any = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def __a ( self :Optional[Any] ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[Any] = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :List[Any] = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[Any] = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Union[str, Any] ):
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Optional[Any] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.default_image_processor
UpperCamelCase__ :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase__ :List[str] = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Optional[int] = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :str = torch.tensor([0.2166, -0.4368, 0.2191] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
_snake_case : Optional[Any] = FocalNetConfig
_snake_case : List[str] = False
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[Any] = FocalNetModelTester(self )
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : str = """convnextv2"""
def __init__( self :Dict , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :Dict=4 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :Dict=None , lowerCamelCase__ :Dict=None , lowerCamelCase__ :List[str]="gelu" , lowerCamelCase__ :str=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :str=2_24 , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :Optional[Any]=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :List[str] = num_channels
UpperCamelCase__ :Optional[Any] = patch_size
UpperCamelCase__ :Union[str, Any] = num_stages
UpperCamelCase__ :List[str] = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
UpperCamelCase__ :int = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase__ :int = hidden_act
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :Optional[int] = layer_norm_eps
UpperCamelCase__ :Any = drop_path_rate
UpperCamelCase__ :Dict = image_size
UpperCamelCase__ :Union[str, Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[Any] = """ClapFeatureExtractor"""
_snake_case : int = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[Any] ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self :List[Any] , lowerCamelCase__ :List[Any]=None , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :Dict=None , **lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :List[str] = kwargs.pop("""sampling_rate""" , lowerCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
UpperCamelCase__ :Optional[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if audios is not None:
UpperCamelCase__ :str = self.feature_extractor(
lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and audios is not None:
UpperCamelCase__ :Tuple = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def __a ( self :Any , *lowerCamelCase__ :List[str] , **lowerCamelCase__ :Any ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :int , *lowerCamelCase__ :Any , **lowerCamelCase__ :Optional[int] ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __a ( self :Tuple ):
UpperCamelCase__ :List[str] = self.tokenizer.model_input_names
UpperCamelCase__ :Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
from __future__ import annotations
from fractions import Fraction
def A ( lowercase__ : int , lowercase__ : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def A ( lowercase__ : int ) -> list[str]:
UpperCamelCase__ :int = []
UpperCamelCase__ :Tuple = 11
UpperCamelCase__ :int = int("""1""" + """0""" * digit_len )
for num in range(lowercase__ , lowercase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(lowercase__ , lowercase__ ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
UpperCamelCase__ :Optional[Any] = 10
return solutions
def A ( lowercase__ : int = 2 ) -> int:
UpperCamelCase__ :Optional[Any] = 1.0
for fraction in fraction_list(lowercase__ ):
UpperCamelCase__ :Any = Fraction(lowercase__ )
result *= frac.denominator / frac.numerator
return int(lowercase__ )
if __name__ == "__main__":
print(solution())
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
def A ( lowercase__ : int ) -> Optional[Any]:
UpperCamelCase__ :List[str] = len(lowercase__ )
for i in range(length - 1 ):
UpperCamelCase__ :Tuple = i
for k in range(i + 1 , lowercase__ ):
if collection[k] < collection[least]:
UpperCamelCase__ :str = k
if least != i:
UpperCamelCase__ , UpperCamelCase__ :str = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float:
UpperCamelCase__ :int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowercase__ )] )
UpperCamelCase__ :List[Any] = np.array(lowercase__ )
UpperCamelCase__ :Dict = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowercase__ ) ) , x.transpose() ) , lowercase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float:
UpperCamelCase__ :Optional[int] = (1, 2, 1)
UpperCamelCase__ :Optional[int] = (1, 1, 0, 7)
UpperCamelCase__ :Optional[int] = SARIMAX(
lowercase__ , exog=lowercase__ , order=lowercase__ , seasonal_order=lowercase__ )
UpperCamelCase__ :Tuple = model.fit(disp=lowercase__ , maxiter=600 , method="""nm""" )
UpperCamelCase__ :int = model_fit.predict(1 , len(lowercase__ ) , exog=[test_match] )
return result[0]
def A ( lowercase__ : list , lowercase__ : list , lowercase__ : list ) -> float:
UpperCamelCase__ :Optional[Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowercase__ , lowercase__ )
UpperCamelCase__ :Optional[int] = regressor.predict(lowercase__ )
return y_pred[0]
def A ( lowercase__ : list ) -> float:
train_user.sort()
UpperCamelCase__ :List[Any] = np.percentile(lowercase__ , 25 )
UpperCamelCase__ :Optional[Any] = np.percentile(lowercase__ , 75 )
UpperCamelCase__ :Optional[int] = qa - qa
UpperCamelCase__ :Tuple = qa - (iqr * 0.1)
return low_lim
def A ( lowercase__ : list , lowercase__ : float ) -> bool:
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :str = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ :List[str] = not_safe + 1
else:
if abs(abs(lowercase__ ) - abs(lowercase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCamelCase = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase = normalize_df[:, 2].tolist()
UpperCamelCase = normalize_df[:, 0].tolist()
UpperCamelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase = normalize_df[:, [1, 2]].tolist()
UpperCamelCase = x[: len(x) - 1]
UpperCamelCase = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase = total_date[: len(total_date) - 1]
UpperCamelCase = total_user[: len(total_user) - 1]
UpperCamelCase = total_match[: len(total_match) - 1]
UpperCamelCase = total_date[len(total_date) - 1 :]
UpperCamelCase = total_user[len(total_user) - 1 :]
UpperCamelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = (PNDMScheduler,)
_snake_case : Any = (("""num_inference_steps""", 50),)
def __a ( self :str , **lowerCamelCase__ :int ):
UpperCamelCase__ :Any = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCamelCase__ )
return config
def __a ( self :Dict , lowerCamelCase__ :List[Any]=0 , **lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Optional[int] = dict(self.forward_default_kwargs )
UpperCamelCase__ :int = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.dummy_sample
UpperCamelCase__ :Tuple = 0.1 * sample
UpperCamelCase__ :List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ :str = self.get_scheduler_config(**lowerCamelCase__ )
UpperCamelCase__ :Tuple = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCamelCase__ :str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCamelCase__ :int = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
UpperCamelCase__ :List[str] = dummy_past_residuals[:]
UpperCamelCase__ :Dict = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ :List[str] = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__ :Optional[Any] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Optional[int] = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __a ( self :List[Any] ):
pass
def __a ( self :Optional[int] , lowerCamelCase__ :Tuple=0 , **lowerCamelCase__ :Tuple ):
UpperCamelCase__ :List[Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ :List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.dummy_sample
UpperCamelCase__ :Union[str, Any] = 0.1 * sample
UpperCamelCase__ :Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ :Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ :Optional[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ :int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ :Optional[int] = dummy_past_residuals[:]
UpperCamelCase__ :Any = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Tuple = new_scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__ :Any = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Union[str, Any] = new_scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __a ( self :Dict , **lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ :int = self.get_scheduler_config(**lowerCamelCase__ )
UpperCamelCase__ :str = scheduler_class(**lowerCamelCase__ )
UpperCamelCase__ :int = 10
UpperCamelCase__ :Any = self.dummy_model()
UpperCamelCase__ :int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase__ :str = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase__ :Dict = model(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = scheduler.step_plms(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = dict(self.forward_default_kwargs )
UpperCamelCase__ :int = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ :int = scheduler_class(**lowerCamelCase__ )
UpperCamelCase__ :Dict = self.dummy_sample
UpperCamelCase__ :Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , """set_timesteps""" ):
UpperCamelCase__ :Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ :Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__ :Tuple = dummy_past_residuals[:]
UpperCamelCase__ :int = scheduler.step_prk(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Dict = scheduler.step_prk(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__ :int = scheduler.step_plms(lowerCamelCase__ , 0 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Union[str, Any] = scheduler.step_plms(lowerCamelCase__ , 1 , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __a ( self :Any ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __a ( self :str ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.scheduler_classes[0]
UpperCamelCase__ :Tuple = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ :Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def __a ( self :List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def __a ( self :Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __a ( self :List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __a ( self :Any ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase__ )
def __a ( self :List[Any] ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCamelCase__ )
def __a ( self :Dict ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCamelCase__ :int = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ :Tuple = self.dummy_sample
UpperCamelCase__ :List[str] = 0.1 * sample
UpperCamelCase__ :List[Any] = self.get_scheduler_config()
UpperCamelCase__ :Any = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase__ :Union[str, Any] = scheduler.step_prk(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
def __a ( self :str ):
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ :str = scheduler_class(**lowerCamelCase__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __a ( self :Optional[int] ):
UpperCamelCase__ :Tuple = self.full_loop()
UpperCamelCase__ :str = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def __a ( self :Dict ):
UpperCamelCase__ :str = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase__ :Tuple = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def __a ( self :Union[str, Any] ):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ :Dict = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 )
UpperCamelCase__ :Union[str, Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def __a ( self :Dict ):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ :Union[str, Any] = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.01 )
UpperCamelCase__ :List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
UpperCamelCase__ :Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """data2vec-vision"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[Any]=7_68 , lowerCamelCase__ :Tuple=12 , lowerCamelCase__ :str=12 , lowerCamelCase__ :Optional[int]=30_72 , lowerCamelCase__ :Union[str, Any]="gelu" , lowerCamelCase__ :Any=0.0 , lowerCamelCase__ :Dict=0.0 , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :Tuple=1e-12 , lowerCamelCase__ :Union[str, Any]=2_24 , lowerCamelCase__ :Union[str, Any]=16 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Tuple=False , lowerCamelCase__ :Tuple=False , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Dict=True , lowerCamelCase__ :Optional[Any]=[3, 5, 7, 11] , lowerCamelCase__ :int=[1, 2, 3, 6] , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Any=0.4 , lowerCamelCase__ :List[Any]=2_56 , lowerCamelCase__ :str=1 , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :str=2_55 , **lowerCamelCase__ :Tuple , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Union[str, Any] = intermediate_size
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Tuple = layer_norm_eps
UpperCamelCase__ :int = image_size
UpperCamelCase__ :List[Any] = patch_size
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :List[Any] = use_mask_token
UpperCamelCase__ :List[Any] = use_absolute_position_embeddings
UpperCamelCase__ :Any = use_relative_position_bias
UpperCamelCase__ :Union[str, Any] = use_shared_relative_position_bias
UpperCamelCase__ :List[Any] = layer_scale_init_value
UpperCamelCase__ :List[Any] = drop_path_rate
UpperCamelCase__ :List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase__ :Tuple = out_indices
UpperCamelCase__ :Any = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ :int = use_auxiliary_head
UpperCamelCase__ :List[str] = auxiliary_loss_weight
UpperCamelCase__ :Optional[Any] = auxiliary_channels
UpperCamelCase__ :Tuple = auxiliary_num_convs
UpperCamelCase__ :List[str] = auxiliary_concat_input
UpperCamelCase__ :Optional[Any] = semantic_loss_ignore_index
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = version.parse("""1.11""" )
@property
def __a ( self :List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self :Union[str, Any] ):
return 1e-4
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
UpperCamelCase = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
UpperCamelCase = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = CamembertTokenizer
_snake_case : Optional[int] = CamembertTokenizerFast
_snake_case : Tuple = True
_snake_case : Dict = True
def __a ( self :Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ :Union[str, Any] = CamembertTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = """<pad>"""
UpperCamelCase__ :Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCamelCase__ ) , 10_04 )
def __a ( self :Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = CamembertTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase__ :Any = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :List[Any] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Dict = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase__ :Any = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase__ :str = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :Dict = self.get_tokenizer()
UpperCamelCase__ :str = self.get_rust_tokenizer()
UpperCamelCase__ :str = """I was born in 92000, and this is falsé."""
UpperCamelCase__ :Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = self.get_rust_tokenizer()
UpperCamelCase__ :Union[str, Any] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :List[str] ):
# fmt: off
UpperCamelCase__ :Any = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase__ :Tuple = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=lowerCamelCase__ , )
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = """conditional_detr"""
_snake_case : List[Any] = ["""past_key_values"""]
_snake_case : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self :Dict , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Any=None , lowerCamelCase__ :Dict=3 , lowerCamelCase__ :Dict=3_00 , lowerCamelCase__ :Union[str, Any]=6 , lowerCamelCase__ :List[Any]=20_48 , lowerCamelCase__ :List[str]=8 , lowerCamelCase__ :Any=6 , lowerCamelCase__ :Optional[int]=20_48 , lowerCamelCase__ :Tuple=8 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :List[str]="relu" , lowerCamelCase__ :List[Any]=2_56 , lowerCamelCase__ :Optional[Any]=0.1 , lowerCamelCase__ :Dict=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :Any=1.0 , lowerCamelCase__ :Dict=False , lowerCamelCase__ :str="sine" , lowerCamelCase__ :Dict="resnet50" , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[str]=5 , lowerCamelCase__ :List[Any]=2 , lowerCamelCase__ :str=1 , lowerCamelCase__ :Any=1 , lowerCamelCase__ :Optional[int]=2 , lowerCamelCase__ :Union[str, Any]=5 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.25 , **lowerCamelCase__ :Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ :Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ :int = backbone_config.get("""model_type""" )
UpperCamelCase__ :Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ :List[Any] = config_class.from_dict(lowerCamelCase__ )
UpperCamelCase__ :str = use_timm_backbone
UpperCamelCase__ :Dict = backbone_config
UpperCamelCase__ :Union[str, Any] = num_channels
UpperCamelCase__ :Optional[Any] = num_queries
UpperCamelCase__ :Any = d_model
UpperCamelCase__ :str = encoder_ffn_dim
UpperCamelCase__ :Union[str, Any] = encoder_layers
UpperCamelCase__ :Tuple = encoder_attention_heads
UpperCamelCase__ :Optional[Any] = decoder_ffn_dim
UpperCamelCase__ :Optional[int] = decoder_layers
UpperCamelCase__ :Optional[Any] = decoder_attention_heads
UpperCamelCase__ :Any = dropout
UpperCamelCase__ :Union[str, Any] = attention_dropout
UpperCamelCase__ :List[Any] = activation_dropout
UpperCamelCase__ :int = activation_function
UpperCamelCase__ :Tuple = init_std
UpperCamelCase__ :Any = init_xavier_std
UpperCamelCase__ :Any = encoder_layerdrop
UpperCamelCase__ :Optional[int] = decoder_layerdrop
UpperCamelCase__ :Optional[int] = encoder_layers
UpperCamelCase__ :Optional[Any] = auxiliary_loss
UpperCamelCase__ :List[Any] = position_embedding_type
UpperCamelCase__ :Any = backbone
UpperCamelCase__ :Dict = use_pretrained_backbone
UpperCamelCase__ :Tuple = dilation
# Hungarian matcher
UpperCamelCase__ :Optional[Any] = class_cost
UpperCamelCase__ :str = bbox_cost
UpperCamelCase__ :int = giou_cost
# Loss coefficients
UpperCamelCase__ :Dict = mask_loss_coefficient
UpperCamelCase__ :str = dice_loss_coefficient
UpperCamelCase__ :Union[str, Any] = cls_loss_coefficient
UpperCamelCase__ :List[Any] = bbox_loss_coefficient
UpperCamelCase__ :List[Any] = giou_loss_coefficient
UpperCamelCase__ :Dict = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __a ( self :Union[str, Any] ):
return self.encoder_attention_heads
@property
def __a ( self :int ):
return self.d_model
def __a ( self :Any ):
UpperCamelCase__ :str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ :str = self.backbone_config.to_dict()
UpperCamelCase__ :Dict = self.__class__.model_type
return output
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Union[str, Any] = version.parse("""1.11""" )
@property
def __a ( self :int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __a ( self :List[Any] ):
return 1e-5
@property
def __a ( self :Optional[int] ):
return 12
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Tuple = nums[0]
UpperCamelCase__ :int = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 1_00 , lowerCamelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ :Optional[float] = None , lowerCamelCase__ :bool = True , ):
if audio_length_in_s is None:
UpperCamelCase__ :List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase__ :Tuple = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase__ :str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase__ :Tuple = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCamelCase__ :Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
UpperCamelCase__ :Dict = int(lowerCamelCase__ )
UpperCamelCase__ :Any = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase__ :Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ :Union[str, Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , device=audio.device )
UpperCamelCase__ :List[Any] = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :Tuple = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase__ :str = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase__ :Optional[int] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
from __future__ import annotations
import bisect
def A ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ :List[Any] = len(lowercase__ )
while lo < hi:
UpperCamelCase__ :List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase__ :Any = mid + 1
else:
UpperCamelCase__ :Union[str, Any] = mid
return lo
def A ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ :Union[str, Any] = len(lowercase__ )
while lo < hi:
UpperCamelCase__ :Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase__ :Dict = mid + 1
else:
UpperCamelCase__ :Dict = mid
return lo
def A ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ) -> None:
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def A ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int = 0 , lowercase__ : int = -1 ) -> None:
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def A ( lowercase__ : list[int] , lowercase__ : int ) -> int | None:
UpperCamelCase__ :str = 0
UpperCamelCase__ :int = len(lowercase__ ) - 1
while left <= right:
UpperCamelCase__ :Optional[Any] = left + (right - left) // 2
UpperCamelCase__ :Tuple = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase__ :str = midpoint - 1
else:
UpperCamelCase__ :Optional[Any] = midpoint + 1
return None
def A ( lowercase__ : list[int] , lowercase__ : int ) -> int | None:
UpperCamelCase__ :int = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def A ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int | None:
if right < left:
return None
UpperCamelCase__ :List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by comma:\n").strip()
UpperCamelCase = sorted(int(item) for item in user_input.split(","))
UpperCamelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCamelCase = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCamelCase = TaTokenizerFast
UpperCamelCase = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCamelCase = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str=13 , lowerCamelCase__ :Dict=64 , lowerCamelCase__ :Dict=3 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :Tuple=1 , lowerCamelCase__ :Tuple=16 , lowerCamelCase__ :int=[1_28, 2_56, 3_84] , lowerCamelCase__ :int=[4, 6, 8] , lowerCamelCase__ :Optional[Any]=[2, 3, 4] , lowerCamelCase__ :int=[16, 16, 16] , lowerCamelCase__ :List[Any]=0 , lowerCamelCase__ :List[Any]=[2, 2, 2] , lowerCamelCase__ :List[Any]=[2, 2, 2] , lowerCamelCase__ :Dict=0.02 , lowerCamelCase__ :Tuple=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :int=2 , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Optional[int] = image_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[Any] = kernel_size
UpperCamelCase__ :List[Any] = stride
UpperCamelCase__ :Optional[Any] = padding
UpperCamelCase__ :Optional[int] = hidden_sizes
UpperCamelCase__ :Tuple = num_attention_heads
UpperCamelCase__ :Optional[int] = depths
UpperCamelCase__ :Dict = key_dim
UpperCamelCase__ :Tuple = drop_path_rate
UpperCamelCase__ :Optional[Any] = patch_size
UpperCamelCase__ :Optional[Any] = attention_ratio
UpperCamelCase__ :List[str] = mlp_ratio
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ :int = is_training
UpperCamelCase__ :Union[str, Any] = use_labels
UpperCamelCase__ :Optional[int] = num_labels
UpperCamelCase__ :Any = initializer_range
def __a ( self :str ):
UpperCamelCase__ :Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :int = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def __a ( self :Union[str, Any] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __a ( self :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Any = LevitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
UpperCamelCase__ :Tuple = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__ :List[Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ :List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ :str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __a ( self :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :Union[str, Any] = LevitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = config_and_inputs
UpperCamelCase__ :Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_snake_case : List[str] = False
_snake_case : Dict = False
_snake_case : Dict = False
_snake_case : List[Any] = False
_snake_case : Dict = False
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = LevitModelTester(self )
UpperCamelCase__ :int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self :Union[str, Any] ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def __a ( self :Union[str, Any] ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def __a ( self :Tuple ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def __a ( self :int ):
pass
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[str] = model_class(lowerCamelCase__ )
UpperCamelCase__ :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Tuple = [*signature.parameters.keys()]
UpperCamelCase__ :Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :int ):
def check_hidden_states_output(lowerCamelCase__ :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :str ):
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ :Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase__ :Optional[int] = outputs.hidden_states
UpperCamelCase__ :Dict = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ :int = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ :Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ :Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ :Dict = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self :Dict ):
pass
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str]=False ):
UpperCamelCase__ :int = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
if not self.model_tester.is_training:
return
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :str = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ :str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ :str = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(**lowerCamelCase__ ).loss
loss.backward()
def __a ( self :Tuple ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = model(**lowerCamelCase__ ).loss
loss.backward()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :Dict = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCamelCase__ :str = problem_type["""title"""]
UpperCamelCase__ :Tuple = problem_type["""num_labels"""]
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
UpperCamelCase__ :List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if problem_type["num_labels"] > 1:
UpperCamelCase__ :Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCamelCase__ :str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase__ ) as warning_list:
UpperCamelCase__ :Dict = model(**lowerCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __a ( self :Any ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Any = LevitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> str:
UpperCamelCase__ :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Any ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self :Tuple ):
UpperCamelCase__ :Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = self.default_image_processor
UpperCamelCase__ :Union[str, Any] = prepare_img()
UpperCamelCase__ :str = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :List[str] = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :Tuple = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Dict ):
UpperCamelCase__ :Union[str, Any] = parent
def __a ( self :Any ):
return {}
def A ( ) -> Union[str, Any]:
UpperCamelCase__ :Dict = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
UpperCamelCase__ :Tuple = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __a ( self :List[str] ):
UpperCamelCase__ :str = MarkupLMFeatureExtractionTester(self )
@property
def __a ( self :str ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __a ( self :Union[str, Any] ):
# Initialize feature_extractor
UpperCamelCase__ :Optional[int] = self.feature_extraction_class()
# Test not batched input
UpperCamelCase__ :List[str] = get_html_strings()[0]
UpperCamelCase__ :List[str] = feature_extractor(lowerCamelCase__ )
# fmt: off
UpperCamelCase__ :List[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
UpperCamelCase__ :List[Any] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase__ )
self.assertEqual(encoding.xpaths , lowerCamelCase__ )
# Test batched
UpperCamelCase__ :int = get_html_strings()
UpperCamelCase__ :str = feature_extractor(lowerCamelCase__ )
# fmt: off
UpperCamelCase__ :Union[str, Any] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
UpperCamelCase__ :List[Any] = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase__ )
self.assertEqual(encoding.xpaths , lowerCamelCase__ )
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase = "facebook/wmt19-en-de"
UpperCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
UpperCamelCase = tokenizer(["Making tiny model"], return_tensors="pt")
UpperCamelCase = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
UpperCamelCase = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
UpperCamelCase__ :List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCamelCase__ :Any = n - k
# Calculate C(n,k)
for i in range(lowercase__ ):
result *= n - i
result //= i + 1
return result
def A ( lowercase__ : int ) -> int:
return binomial_coefficient(2 * node_count , lowercase__ ) // (node_count + 1)
def A ( lowercase__ : int ) -> int:
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
UpperCamelCase__ :Union[str, Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A ( lowercase__ : int ) -> int:
return catalan_number(lowercase__ ) * factorial(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def __a ( self :Tuple , lowerCamelCase__ :Any ):
UpperCamelCase__ :List[str] = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , config_name=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = GenerationConfig.from_pretrained(lowerCamelCase__ , config_name=lowerCamelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Union[str, Any] = AutoConfig.from_pretrained("""gpt2""" )
UpperCamelCase__ :List[Any] = GenerationConfig.from_model_config(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __a ( self :Any ):
UpperCamelCase__ :Any = GenerationConfig()
UpperCamelCase__ :List[str] = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
UpperCamelCase__ :Optional[int] = copy.deepcopy(lowerCamelCase__ )
UpperCamelCase__ :int = generation_config.update(**lowerCamelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase__ , {"""foo""": """bar"""} )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = GenerationConfig()
UpperCamelCase__ :Optional[Any] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = GenerationConfig.from_pretrained(lowerCamelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
UpperCamelCase__ :Optional[int] = GenerationConfig.from_model_config(lowerCamelCase__ )
assert not hasattr(lowerCamelCase__ , """foo""" ) # no new kwargs should be initialized if from config
def __a ( self :Dict ):
UpperCamelCase__ :str = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase__ )
self.assertEqual(default_config.num_beams , 1 )
UpperCamelCase__ :Dict = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = GenerationConfig.from_pretrained(lowerCamelCase__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __a ( cls :List[str] ):
UpperCamelCase__ :List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __a ( cls :Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def __a ( self :Dict ):
UpperCamelCase__ :Tuple = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
UpperCamelCase__ :List[Any] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="""test-generation-config""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ :List[str] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def __a ( self :List[Any] ):
UpperCamelCase__ :List[Any] = GenerationConfig(
do_sample=lowerCamelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
UpperCamelCase__ :Any = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ :Dict = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
from PIL import Image
def A ( lowercase__ : Image , lowercase__ : float ) -> Image:
def brightness(lowercase__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
import json
import sys
def A ( lowercase__ : Optional[Any] , lowercase__ : Any ) -> Tuple:
with open(lowercase__ , encoding="""utf-8""" ) as f:
UpperCamelCase__ :Union[str, Any] = json.load(lowercase__ )
UpperCamelCase__ :Optional[Any] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowercase__ ):
UpperCamelCase__ :List[Any] = results[benchmark_name]
UpperCamelCase__ :Dict = benchmark_name.split("""/""" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
UpperCamelCase__ :int = """| metric |"""
UpperCamelCase__ :Tuple = """|--------|"""
UpperCamelCase__ :Tuple = """| new / old (diff) |"""
for metric_name in sorted(lowercase__ ):
UpperCamelCase__ :Dict = benchmark_res[metric_name]
UpperCamelCase__ :Any = metric_vals["""new"""]
UpperCamelCase__ :Optional[Any] = metric_vals.get("""old""" , lowercase__ )
UpperCamelCase__ :int = metric_vals.get("""diff""" , lowercase__ )
UpperCamelCase__ :Optional[Any] = f""" {new_val:f}""" if isinstance(lowercase__ , (int, float) ) else """None"""
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(lowercase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(lowercase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowercase__ ) )
if __name__ == "__main__":
UpperCamelCase = sys.argv[1]
UpperCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCamelCase = logging.getLogger()
UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] ):
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
UpperCamelCase__ :int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase__ :int = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase__ :int = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , f"""{split}.{field}""" ) , """w""" ) as f:
f.write(lowerCamelCase__ )
def __a ( self :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :str = "pytorch" ):
UpperCamelCase__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :Optional[Any] = os.path.join(lowerCamelCase__ , """output""" )
UpperCamelCase__ :str = os.path.join(lowerCamelCase__ , """data""" )
self._create_dummy_data(data_dir=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase__ :List[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
UpperCamelCase__ :Optional[int] = os.path.join(lowerCamelCase__ , """metrics.json""" )
with open(lowerCamelCase__ ) as f:
UpperCamelCase__ :List[str] = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :List[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def __a ( self :Optional[Any] ):
UpperCamelCase__ :List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def __a ( self :str ):
UpperCamelCase__ :Any = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Any , *lowerCamelCase__ :List[str] , **lowerCamelCase__ :Tuple ):
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """vision-encoder-decoder"""
_snake_case : Optional[Any] = True
def __init__( self :List[Any] , **lowerCamelCase__ :List[str] ):
super().__init__(**lowerCamelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
UpperCamelCase__ :List[Any] = kwargs.pop("""encoder""" )
UpperCamelCase__ :Any = encoder_config.pop("""model_type""" )
UpperCamelCase__ :Optional[int] = kwargs.pop("""decoder""" )
UpperCamelCase__ :Optional[Any] = decoder_config.pop("""model_type""" )
UpperCamelCase__ :Optional[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoConfig.for_model(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Dict = True
@classmethod
def __a ( cls :Union[str, Any] , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , **lowerCamelCase__ :List[Any] ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Any = copy.deepcopy(self.__dict__ )
UpperCamelCase__ :str = self.encoder.to_dict()
UpperCamelCase__ :Any = self.decoder.to_dict()
UpperCamelCase__ :List[Any] = self.__class__.model_type
return output
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[Any] = version.parse("""1.11""" )
@property
def __a ( self :int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self :Union[str, Any] ):
return 1e-4
@property
def __a ( self :List[Any] ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def __a ( self :Optional[Any] ):
UpperCamelCase__ :str = OrderedDict()
UpperCamelCase__ :Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase__ :List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase__ :str = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def __a ( self :int , lowerCamelCase__ :"PreTrainedTokenizerBase" , lowerCamelCase__ :int = -1 , lowerCamelCase__ :int = -1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional["TensorType"] = None , ):
import torch
UpperCamelCase__ :Dict = OrderedDict()
UpperCamelCase__ :Tuple = super().generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :str = dummy_input["""input_ids"""].shape
UpperCamelCase__ :str = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase__ :Optional[Any] = dummy_input.pop("""input_ids""" )
UpperCamelCase__ :List[Any] = dummy_input.pop("""attention_mask""" )
UpperCamelCase__ :str = torch.zeros(lowerCamelCase__ )
return common_inputs
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
@property
def __a ( self :List[str] ):
pass
def __a ( self :List[str] , lowerCamelCase__ :PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(lowerCamelCase__ )
def __a ( self :Dict , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :PretrainedConfig , lowerCamelCase__ :str = "default" ):
UpperCamelCase__ :Tuple = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowerCamelCase__ , lowerCamelCase__ )
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
UpperCamelCase = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def A ( lowercase__ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A ( lowercase__ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def A ( ) -> None:
UpperCamelCase__ :Union[str, Any] = """Morse code here!"""
print(lowercase__ )
UpperCamelCase__ :Dict = encrypt(lowercase__ )
print(lowercase__ )
UpperCamelCase__ :Optional[Any] = decrypt(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :str , lowerCamelCase__ :Distribution , lowerCamelCase__ :Any=None , lowerCamelCase__ :List[Any]=None , lowerCamelCase__ :List[str]=0 ):
UpperCamelCase__ :int = 1.0 if scale is None else scale
UpperCamelCase__ :Union[str, Any] = 0.0 if loc is None else loc
super().__init__(lowerCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase__ )] )
@property
def __a ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def __a ( self :List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def __a ( self :Optional[int] ):
return self.variance.sqrt()
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Dict[str, int] , lowerCamelCase__ :Callable[..., Tuple[torch.Tensor]] , **lowerCamelCase__ :Any ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = args_dim
UpperCamelCase__ :Dict = nn.ModuleList([nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) for dim in args_dim.values()] )
UpperCamelCase__ :List[str] = domain_map
def __a ( self :Tuple , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Optional[Any] = [proj(lowerCamelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase__ )
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Optional[int] ):
super().__init__()
UpperCamelCase__ :Union[str, Any] = function
def __a ( self :int , lowerCamelCase__ :Union[str, Any] , *lowerCamelCase__ :Tuple ):
return self.function(lowerCamelCase__ , *lowerCamelCase__ )
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : type
_snake_case : int
_snake_case : Dict[str, int]
def __init__( self :Tuple , lowerCamelCase__ :int = 1 ):
UpperCamelCase__ :str = dim
UpperCamelCase__ :List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, Any] ):
if self.dim == 1:
return self.distribution_class(*lowerCamelCase__ )
else:
return Independent(self.distribution_class(*lowerCamelCase__ ) , 1 )
def __a ( self :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[torch.Tensor] = None , lowerCamelCase__ :Optional[torch.Tensor] = None , ):
UpperCamelCase__ :Union[str, Any] = self._base_distribution(lowerCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase__ , loc=lowerCamelCase__ , scale=lowerCamelCase__ , event_dim=self.event_dim )
@property
def __a ( self :Optional[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def __a ( self :List[str] ):
return len(self.event_shape )
@property
def __a ( self :Any ):
return 0.0
def __a ( self :Any , lowerCamelCase__ :int ):
return ParameterProjection(
in_features=lowerCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __a ( self :Union[str, Any] , *lowerCamelCase__ :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def __a ( lowerCamelCase__ :torch.Tensor ):
return (x + torch.sqrt(torch.square(lowerCamelCase__ ) + 4.0 )) / 2.0
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_snake_case : type = StudentT
@classmethod
def __a ( cls :List[str] , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Dict = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase__ :Dict = 2.0 + cls.squareplus(lowerCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict[str, int] = {"loc": 1, "scale": 1}
_snake_case : type = Normal
@classmethod
def __a ( cls :Union[str, Any] , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Tuple = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict[str, int] = {"total_count": 1, "logits": 1}
_snake_case : type = NegativeBinomial
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Union[str, Any] = cls.squareplus(lowerCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) , 1 )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[torch.Tensor] = None , lowerCamelCase__ :Optional[torch.Tensor] = None ):
UpperCamelCase__ , UpperCamelCase__ :int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase__ :Dict ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
UpperCamelCase__ :Optional[Any] = deepcopy(lowerCamelCase__ )
elif os.path.exists(lowerCamelCase__ ):
with io.open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ :int = json.load(lowerCamelCase__ )
else:
try:
UpperCamelCase__ :int = baseaa.urlsafe_baadecode(lowerCamelCase__ ).decode("""utf-8""" )
UpperCamelCase__ :Union[str, Any] = json.loads(lowerCamelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
UpperCamelCase__ :Tuple = config
self.set_stage_and_offload()
def __a ( self :Tuple ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
UpperCamelCase__ :Dict = self.get_value("""zero_optimization.stage""" , -1 )
# offload
UpperCamelCase__ :Tuple = False
if self.is_zeroa() or self.is_zeroa():
UpperCamelCase__ :Optional[int] = set(["""cpu""", """nvme"""] )
UpperCamelCase__ :Optional[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
UpperCamelCase__ :Optional[int] = True
def __a ( self :Union[str, Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Tuple = self.config
# find the config node of interest if it exists
UpperCamelCase__ :Any = ds_key_long.split(""".""" )
UpperCamelCase__ :int = nodes.pop()
for node in nodes:
UpperCamelCase__ :Dict = config.get(lowerCamelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def __a ( self :Dict , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any]=None ):
UpperCamelCase__ , UpperCamelCase__ :Dict = self.find_config_node(lowerCamelCase__ )
if config is None:
return default
return config.get(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Tuple=False ):
UpperCamelCase__ :Tuple = self.config
# find the config node of interest if it exists
UpperCamelCase__ :Optional[Any] = ds_key_long.split(""".""" )
for node in nodes:
UpperCamelCase__ :Any = config
UpperCamelCase__ :Optional[Any] = config.get(lowerCamelCase__ )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCamelCase__ )
def __a ( self :Any , lowerCamelCase__ :str ):
UpperCamelCase__ :Any = self.get_value(lowerCamelCase__ )
return False if value is None else bool(lowerCamelCase__ )
def __a ( self :List[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Dict = self.get_value(lowerCamelCase__ )
return False if value is None else not bool(lowerCamelCase__ )
def __a ( self :Dict ):
return self._stage == 2
def __a ( self :List[str] ):
return self._stage == 3
def __a ( self :Optional[int] ):
return self._offload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :int ):
UpperCamelCase__ :Union[str, Any] = engine
def __a ( self :str , lowerCamelCase__ :List[str] , **lowerCamelCase__ :Optional[int] ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowerCamelCase__ , **lowerCamelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Tuple ):
super().__init__(lowerCamelCase__ , device_placement=lowerCamelCase__ , scaler=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = hasattr(self.optimizer , """overflow""" )
def __a ( self :int , lowerCamelCase__ :str=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __a ( self :List[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __a ( self :List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict=0.001 , lowerCamelCase__ :Optional[int]=0 , **lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Any = params
UpperCamelCase__ :Tuple = lr
UpperCamelCase__ :Optional[int] = weight_decay
UpperCamelCase__ :Optional[Any] = kwargs
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :int , lowerCamelCase__ :int=None , lowerCamelCase__ :List[str]=0 , **lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Optional[Any] = optimizer
UpperCamelCase__ :List[Any] = total_num_steps
UpperCamelCase__ :Any = warmup_num_steps
UpperCamelCase__ :Any = kwargs
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Union[str, Any] , ):
UpperCamelCase__ :Tuple = parent
UpperCamelCase__ :str = 13
UpperCamelCase__ :Union[str, Any] = 7
UpperCamelCase__ :List[Any] = 30
UpperCamelCase__ :int = self.seq_length + self.mem_len
UpperCamelCase__ :Tuple = 15
UpperCamelCase__ :int = True
UpperCamelCase__ :int = True
UpperCamelCase__ :Union[str, Any] = 99
UpperCamelCase__ :Any = [10, 50, 80]
UpperCamelCase__ :List[str] = 32
UpperCamelCase__ :Optional[Any] = 32
UpperCamelCase__ :int = 4
UpperCamelCase__ :Optional[int] = 8
UpperCamelCase__ :Tuple = 1_28
UpperCamelCase__ :List[Any] = 2
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :Dict = None
UpperCamelCase__ :List[Any] = 1
UpperCamelCase__ :Any = 0
UpperCamelCase__ :List[str] = 3
UpperCamelCase__ :Any = self.vocab_size - 1
UpperCamelCase__ :Optional[int] = 0.01
def __a ( self :Optional[Any] ):
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __a ( self :Optional[int] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = TFTransfoXLModel(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :List[str] = model(lowerCamelCase__ ).to_tuple()
UpperCamelCase__ :int = {"""input_ids""": input_ids_a, """mems""": mems_a}
UpperCamelCase__ , UpperCamelCase__ :str = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict ):
UpperCamelCase__ :Union[str, Any] = TFTransfoXLLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :str = model(lowerCamelCase__ ).to_tuple()
UpperCamelCase__ :Tuple = {"""input_ids""": input_ids_a, """labels""": lm_labels}
UpperCamelCase__ , UpperCamelCase__ :Any = model(lowerCamelCase__ ).to_tuple()
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase__ :str = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __a ( self :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :Union[str, Any] = TFTransfoXLForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :int = config_and_inputs
UpperCamelCase__ :List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_snake_case : List[str] = () if is_tf_available() else ()
_snake_case : List[str] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_snake_case : Optional[Any] = False
_snake_case : Any = False
_snake_case : Tuple = False
_snake_case : List[Any] = False
def __a ( self :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :Any = TFTransfoXLModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , d_embed=37 )
def __a ( self :str ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
self.model_tester.set_seed()
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*lowerCamelCase__ )
def __a ( self :str ):
self.model_tester.set_seed()
UpperCamelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ , UpperCamelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ :int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase__ :Dict = model_class(lowerCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase__ :Optional[Any] = model.get_output_embeddings()
assert isinstance(lowerCamelCase__ , tf.keras.layers.Layer )
UpperCamelCase__ :List[Any] = model.get_bias()
assert name is None
else:
UpperCamelCase__ :Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCamelCase__ :int = model.get_bias()
assert name is None
def __a ( self :int ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __a ( self :int ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :str = TFTransfoXLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __a ( self :Union[str, Any] ):
pass
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __a ( self :str ):
UpperCamelCase__ :int = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase__ :Tuple = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase__ :str = model.generate(lowerCamelCase__ , max_length=2_00 , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A ( lowercase__ : Optional[Any] , lowercase__ : str=7 ) -> List[str]:
UpperCamelCase__ :List[Any] = None
if token is not None:
UpperCamelCase__ :Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
UpperCamelCase__ :str = """636036"""
UpperCamelCase__ :Tuple = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
UpperCamelCase__ :List[str] = requests.get(lowercase__ , headers=lowercase__ ).json()
return result["workflow_runs"]
def A ( lowercase__ : Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ :Any = get_daily_ci_runs(lowercase__ )
UpperCamelCase__ :Optional[Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCamelCase__ :Tuple = workflow_run["""id"""]
break
return workflow_run_id
def A ( lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : int ) -> List[str]:
UpperCamelCase__ :Any = get_last_daily_ci_runs(lowercase__ )
if workflow_run_id is not None:
UpperCamelCase__ :int = get_artifacts_links(worflow_run_id=lowercase__ , token=lowercase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCamelCase__ :int = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase__ , artifact_url=lowercase__ , output_dir=lowercase__ , token=lowercase__ )
def A ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] ) -> Optional[int]:
get_last_daily_ci_artifacts(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :str = {}
for artifact_name in artifact_names:
UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , f"""{artifact_name}.zip""" )
if os.path.isfile(lowercase__ ):
UpperCamelCase__ :Optional[Any] = {}
with zipfile.ZipFile(lowercase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase__ ):
# read the file
with z.open(lowercase__ ) as f:
UpperCamelCase__ :str = f.read().decode("""UTF-8""" )
return results
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( lowercase__ : str , lowercase__ : Optional[int] ) -> str:
UpperCamelCase__ :Any = int(lowercase__ )
assert noofclusters < len(lowercase__ )
# Find out the dimensionality
UpperCamelCase__ :str = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase__ :Dict = list(range(len(lowercase__ ) ) )
shuffle(lowercase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase__ :List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase__ :Dict = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase__ :Optional[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase__ :Optional[Any] = tf.placeholder("""float64""" , [dim] )
UpperCamelCase__ :List[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase__ :str = [tf.Variable(0 ) for i in range(len(lowercase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase__ :Union[str, Any] = tf.placeholder("""int32""" )
UpperCamelCase__ :str = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase__ :int = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase__ :List[str] = tf.reduce_mean(lowercase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase__ :str = tf.placeholder("""float""" , [dim] )
UpperCamelCase__ :Any = tf.placeholder("""float""" , [dim] )
UpperCamelCase__ :List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase__ , lowercase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase__ :Dict = tf.placeholder("""float""" , [noofclusters] )
UpperCamelCase__ :Optional[int] = tf.argmin(lowercase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase__ :Union[str, Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase__ :Dict = 100
for _ in range(lowercase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase__ ) ):
UpperCamelCase__ :Optional[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase__ :str = [
sess.run(lowercase__ , feed_dict={va: vect, va: sess.run(lowercase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase__ :Optional[int] = sess.run(
lowercase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase__ ):
# Collect all the vectors assigned to this cluster
UpperCamelCase__ :Any = [
vectors[i]
for i in range(len(lowercase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase__ :Optional[int] = sess.run(
lowercase__ , feed_dict={mean_input: array(lowercase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase__ :Optional[Any] = sess.run(lowercase__ )
UpperCamelCase__ :Dict = sess.run(lowercase__ )
return centroids, assignments
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def A ( lowercase__ : str , lowercase__ : str ) -> int:
UpperCamelCase__ :List[Any] = RobertaPreLayerNormConfig.from_pretrained(
lowercase__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
UpperCamelCase__ :Union[str, Any] = torch.load(hf_hub_download(repo_id=lowercase__ , filename="""pytorch_model.bin""" ) )
UpperCamelCase__ :List[str] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
UpperCamelCase__ :Tuple = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
UpperCamelCase__ :Dict = tensor_value
UpperCamelCase__ :Dict = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ )
model.save_pretrained(lowercase__ )
# convert tokenizer
UpperCamelCase__ :Tuple = AutoTokenizer.from_pretrained(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
from __future__ import annotations
UpperCamelCase = 8.988E9 # units = N * m^s * C^-2
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
UpperCamelCase__ :Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
UpperCamelCase__ :Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCamelCase__ :List[Any] = abs(lowercase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCamelCase__ :int = abs(lowercase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCamelCase__ :Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowercase__ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
def A ( lowercase__ : int ) -> bool:
return str(lowercase__ ) == str(lowercase__ )[::-1]
def A ( lowercase__ : int ) -> int:
return int(lowercase__ ) + int(str(lowercase__ )[::-1] )
def A ( lowercase__ : int = 1_0000 ) -> int:
UpperCamelCase__ :int = []
for num in range(1 , lowercase__ ):
UpperCamelCase__ :int = 0
UpperCamelCase__ :Optional[Any] = num
while iterations < 50:
UpperCamelCase__ :List[Any] = sum_reverse(lowercase__ )
iterations += 1
if is_palindrome(lowercase__ ):
break
else:
lychrel_nums.append(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
def A ( lowercase__ : list[int] , lowercase__ : list[int] ) -> None:
UpperCamelCase__ :Union[str, Any] = len(lowercase__ )
print("""The following activities are selected:""" )
# The first activity is always selected
UpperCamelCase__ :int = 0
print(lowercase__ , end=""",""" )
# Consider rest of the activities
for j in range(lowercase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase__ , end=""",""" )
UpperCamelCase__ :str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [1, 3, 0, 5, 8, 5]
UpperCamelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
_snake_case : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
_snake_case : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( self :int ):
UpperCamelCase__ :List[Any] = self.task_name.lower()
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[str] = """train"""
_snake_case : Optional[int] = """dev"""
_snake_case : Union[str, Any] = """test"""
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : GlueDataTrainingArguments
_snake_case : str
_snake_case : List[InputFeatures]
def __init__( self :Any , lowerCamelCase__ :GlueDataTrainingArguments , lowerCamelCase__ :PreTrainedTokenizerBase , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Union[str, Split] = Split.train , lowerCamelCase__ :Optional[str] = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCamelCase__ , )
UpperCamelCase__ :List[Any] = args
UpperCamelCase__ :Optional[int] = glue_processors[args.task_name]()
UpperCamelCase__ :List[str] = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
try:
UpperCamelCase__ :Tuple = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCamelCase__ :str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase__ :str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ :str = label_list[2], label_list[1]
UpperCamelCase__ :Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ :Optional[Any] = cached_features_file + """.lock"""
with FileLock(lowerCamelCase__ ):
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
UpperCamelCase__ :Dict = time.time()
UpperCamelCase__ :List[str] = torch.load(lowerCamelCase__ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase__ :Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase__ :List[str] = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase__ :int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase__ :str = examples[:limit_length]
UpperCamelCase__ :Tuple = glue_convert_examples_to_features(
lowerCamelCase__ , lowerCamelCase__ , max_length=args.max_seq_length , label_list=lowerCamelCase__ , output_mode=self.output_mode , )
UpperCamelCase__ :Union[str, Any] = time.time()
torch.save(self.features , lowerCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self :Dict ):
return len(self.features )
def __getitem__( self :Union[str, Any] , lowerCamelCase__ :List[Any] ):
return self.features[i]
def __a ( self :int ):
return self.label_list
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A ( lowercase__ : str = "" ) -> dict[str, float]:
UpperCamelCase__ :Union[str, Any] = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
UpperCamelCase__ :List[str] = BeautifulSoup(requests.get(lowercase__ ).text , """html.parser""" )
UpperCamelCase__ :Any = soup.find_all("""td""" , attrs="""titleColumn""" )
UpperCamelCase__ :Optional[int] = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase__ , lowercase__ )
}
def A ( lowercase__ : str = "IMDb_Top_250_Movies.csv" ) -> None:
UpperCamelCase__ :List[str] = get_imdb_top_aaa_movies()
with open(lowercase__ , """w""" , newline="""""" ) as out_file:
UpperCamelCase__ :str = csv.writer(lowercase__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCamelCase = TypeVar("T")
class lowerCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase__ :bool = True ):
UpperCamelCase__ :dict[T, list[T]] = {} # dictionary of lists
UpperCamelCase__ :Optional[int] = directed
def __a ( self :Dict , lowerCamelCase__ :T , lowerCamelCase__ :T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
self.adj_list[destination_vertex].append(lowerCamelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
UpperCamelCase__ :Tuple = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase__ )
UpperCamelCase__ :Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCamelCase__ :List[str] = [destination_vertex]
UpperCamelCase__ :Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCamelCase__ :Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCamelCase__ :Optional[Any] = [destination_vertex]
UpperCamelCase__ :Any = []
return self
def __repr__( self :Dict ):
return pformat(self.adj_list )
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
def A ( lowercase__ : int = 50 ) -> int:
UpperCamelCase__ :Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : list[float] ) -> float:
UpperCamelCase__ :Optional[int] = 0.00
UpperCamelCase__ :Tuple = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ :List[Any] = f"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def A ( lowercase__ : list[float] ) -> float:
UpperCamelCase__ :str = 0.00
UpperCamelCase__ :int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ :Optional[Any] = f"""Resistor at index {index} has a negative value!"""
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
UpperCamelCase = range(2, 20 + 1)
UpperCamelCase = [10**k for k in range(ks[-1] + 1)]
UpperCamelCase = {}
def A ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[Any] ) -> Any:
UpperCamelCase__ :List[Any] = sum(a_i[j] for j in range(lowercase__ , len(lowercase__ ) ) )
UpperCamelCase__ :str = sum(a_i[j] * base[j] for j in range(min(len(lowercase__ ) , lowercase__ ) ) )
UpperCamelCase__ , UpperCamelCase__ :List[str] = 0, 0
UpperCamelCase__ :Union[str, Any] = n - i
UpperCamelCase__ :str = memo.get(lowercase__ )
if sub_memo is not None:
UpperCamelCase__ :Tuple = sub_memo.get(lowercase__ )
if jumps is not None and len(lowercase__ ) > 0:
# find and make the largest jump without going over
UpperCamelCase__ :List[Any] = -1
for _k in range(len(lowercase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCamelCase__ :str = _k
break
if max_jump >= 0:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCamelCase__ :Tuple = diff + c
for j in range(min(lowercase__ , len(lowercase__ ) ) ):
UpperCamelCase__ , UpperCamelCase__ :Any = divmod(lowercase__ , 10 )
if new_c > 0:
add(lowercase__ , lowercase__ , lowercase__ )
else:
UpperCamelCase__ :Dict = []
else:
UpperCamelCase__ :int = {c: []}
UpperCamelCase__ :int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCamelCase__ , UpperCamelCase__ :Dict = next_term(lowercase__ , k - 1 , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCamelCase__ , UpperCamelCase__ :Dict = compute(lowercase__ , lowercase__ , i + dn , lowercase__ )
diff += _diff
dn += terms_jumped
UpperCamelCase__ :Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCamelCase__ :Optional[Any] = 0
while j < len(lowercase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowercase__ , (diff, dn, k) )
return (diff, dn)
def A ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : int ) -> Tuple:
if i >= n:
return 0, i
if k > len(lowercase__ ):
a_i.extend([0 for _ in range(k - len(lowercase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCamelCase__ :Optional[Any] = i
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[int] = 0, 0, 0
for j in range(len(lowercase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCamelCase__ :Any = ds_c + ds_b
diff += addend
UpperCamelCase__ :int = 0
for j in range(lowercase__ ):
UpperCamelCase__ :Union[str, Any] = a_i[j] + addend
UpperCamelCase__ , UpperCamelCase__ :str = divmod(lowercase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowercase__ , lowercase__ , lowercase__ )
return diff, i - start_i
def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : List[str] ) -> Any:
for j in range(lowercase__ , len(lowercase__ ) ):
UpperCamelCase__ :Union[str, Any] = digits[j] + addend
if s >= 10:
UpperCamelCase__ , UpperCamelCase__ :int = divmod(lowercase__ , 10 )
UpperCamelCase__ :Optional[int] = addend // 10 + quotient
else:
UpperCamelCase__ :Union[str, Any] = s
UpperCamelCase__ :Dict = addend // 10
if addend == 0:
break
while addend > 0:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = divmod(lowercase__ , 10 )
digits.append(lowercase__ )
def A ( lowercase__ : int = 10**15 ) -> int:
UpperCamelCase__ :Any = [1]
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Dict = 0
while True:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = next_term(lowercase__ , 20 , i + dn , lowercase__ )
dn += terms_jumped
if dn == n - i:
break
UpperCamelCase__ :Tuple = 0
for j in range(len(lowercase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = StableDiffusionSAGPipeline
_snake_case : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_snake_case : int = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Union[str, Any] = False
def __a ( self :List[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ :Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase__ :Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
UpperCamelCase__ :int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCamelCase__ :int = CLIPTextModel(lowerCamelCase__ )
UpperCamelCase__ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase__ :Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __a ( self :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any]=0 ):
if str(lowerCamelCase__ ).startswith("""mps""" ):
UpperCamelCase__ :Dict = torch.manual_seed(lowerCamelCase__ )
else:
UpperCamelCase__ :int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCamelCase__ :Any = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def __a ( self :Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :int ):
UpperCamelCase__ :int = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
UpperCamelCase__ :Optional[int] = sag_pipe.to(lowerCamelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = """."""
UpperCamelCase__ :Dict = torch.manual_seed(0 )
UpperCamelCase__ :str = sag_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
UpperCamelCase__ :Optional[int] = output.images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Optional[Any] = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __a ( self :Dict ):
UpperCamelCase__ :Tuple = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCamelCase__ :Dict = sag_pipe.to(lowerCamelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = """."""
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :List[Any] = sag_pipe(
[prompt] , generator=lowerCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
UpperCamelCase__ :List[str] = output.images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[str] = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __a ( self :str ):
UpperCamelCase__ :str = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
UpperCamelCase__ :Tuple = sag_pipe.to(lowerCamelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = """."""
UpperCamelCase__ :int = torch.manual_seed(0 )
UpperCamelCase__ :int = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowerCamelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
UpperCamelCase__ :str = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = "bert-base-cased"
UpperCamelCase = "fp16"
UpperCamelCase = "bf16"
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :List[str] ):
super().setUp()
UpperCamelCase__ :str = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :List[Any] = f"""{i + 1}"""
UpperCamelCase__ :List[Any] = strategy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __a ( self :Union[str, Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Optional[int] = prefetch_policy
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Dict = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :Tuple = state_dict_type
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = AutoModel.from_pretrained(lowerCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :int = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCamelCase__ :Optional[Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCamelCase__ :Union[str, Any] = """2000"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :int = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCamelCase__ :Optional[int] = self.dist_env.copy()
UpperCamelCase__ :str = """TRANSFORMER_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """T5Layer"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Any = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :int = """SIZE_BASED_WRAP"""
UpperCamelCase__ :Union[str, Any] = """0"""
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCamelCase__ :Dict = self.dist_env.copy()
UpperCamelCase__ :Dict = mp_dtype
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = Accelerator()
if mp_dtype == "fp16":
UpperCamelCase__ :Tuple = torch.floataa
elif mp_dtype == "bf16":
UpperCamelCase__ :Tuple = torch.bfloataa
UpperCamelCase__ :int = MixedPrecision(param_dtype=lowerCamelCase__ , reduce_dtype=lowerCamelCase__ , buffer_dtype=lowerCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase__ )
def __a ( self :Optional[Any] ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCamelCase__ :List[str] = self.dist_env.copy()
UpperCamelCase__ :Dict = str(lowerCamelCase__ ).lower()
with mockenv_context(**lowerCamelCase__ ):
UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __a ( self :Dict ):
super().setUp()
UpperCamelCase__ :str = 0.82
UpperCamelCase__ :int = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCamelCase__ :int = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCamelCase__ :Optional[Any] = 1_60
UpperCamelCase__ :List[str] = 1_60
UpperCamelCase__ :Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def __a ( self :str ):
UpperCamelCase__ :int = os.path.join(self.test_scripts_folder , """test_performance.py""" )
UpperCamelCase__ :List[str] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCamelCase__ :Optional[Any] = cmd.copy()
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :str ):
UpperCamelCase__ :List[Any] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
UpperCamelCase__ :Any = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
UpperCamelCase__ :Optional[int] = len(lowerCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCamelCase__ :Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
UpperCamelCase__ :List[Any] = cmd_config[:-1]
UpperCamelCase__ :Tuple = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
UpperCamelCase__ :Optional[int] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCamelCase__ :Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict=13 , lowerCamelCase__ :Tuple=30 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :Tuple=3 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :List[str]=True , lowerCamelCase__ :Any=32 , lowerCamelCase__ :Optional[Any]=5 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :List[Any]=37 , lowerCamelCase__ :Any="gelu" , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :Optional[int]=10 , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :Optional[Any]=None , lowerCamelCase__ :Optional[Any]=2 , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = image_size
UpperCamelCase__ :List[Any] = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :Tuple = is_training
UpperCamelCase__ :List[str] = use_labels
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ :Any = type_sequence_label_size
UpperCamelCase__ :Tuple = initializer_range
UpperCamelCase__ :Optional[Any] = scope
UpperCamelCase__ :Optional[int] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ :Any = (image_size // patch_size) ** 2
UpperCamelCase__ :Optional[Any] = num_patches + 1
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Dict = self.get_config()
return config, pixel_values, labels
def __a ( self :Tuple ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self :str , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Any ):
UpperCamelCase__ :Any = ViTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Union[str, Any] = ViTForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Dict = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ :Any = 1
UpperCamelCase__ :Optional[int] = ViTForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , lowerCamelCase__ :Dict ):
UpperCamelCase__ :Optional[Any] = self.type_sequence_label_size
UpperCamelCase__ :Union[str, Any] = ViTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :List[str] = ViTForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = config_and_inputs
UpperCamelCase__ :Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_snake_case : str = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = True
_snake_case : Optional[int] = False
_snake_case : Tuple = False
_snake_case : Tuple = False
def __a ( self :str ):
UpperCamelCase__ :int = ViTModelTester(self )
UpperCamelCase__ :Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __a ( self :str ):
pass
def __a ( self :List[Any] ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def __a ( self :Tuple ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :List[Any] = model_class(lowerCamelCase__ )
UpperCamelCase__ :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ :Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__ :Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :int = ViTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> Dict:
UpperCamelCase__ :List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :List[Any] ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __a ( self :str ):
UpperCamelCase__ :int = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowerCamelCase__ )
UpperCamelCase__ :Dict = self.default_image_processor
UpperCamelCase__ :Tuple = prepare_img()
UpperCamelCase__ :Optional[Any] = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Dict = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase__ :Any = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def __a ( self :Tuple ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCamelCase__ :Any = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowerCamelCase__ )
UpperCamelCase__ :Any = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 )
UpperCamelCase__ :Optional[Any] = prepare_img()
UpperCamelCase__ :int = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
UpperCamelCase__ :Union[str, Any] = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ :Dict = model(lowerCamelCase__ , interpolate_pos_encoding=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :Union[str, Any] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :str = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __a ( self :str ):
UpperCamelCase__ :Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCamelCase__ :Union[str, Any] = self.default_image_processor
UpperCamelCase__ :Any = prepare_img()
UpperCamelCase__ :List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
UpperCamelCase__ :Dict = inputs.pixel_values.to(lowerCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = """sew-d"""
def __init__( self :Optional[Any] , lowerCamelCase__ :int=32 , lowerCamelCase__ :Optional[Any]=7_68 , lowerCamelCase__ :Optional[Any]=12 , lowerCamelCase__ :Dict=12 , lowerCamelCase__ :Any=30_72 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Tuple=5_12 , lowerCamelCase__ :Optional[Any]=2_56 , lowerCamelCase__ :int=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :int=("p2c", "c2p") , lowerCamelCase__ :List[str]="layer_norm" , lowerCamelCase__ :int="gelu_python" , lowerCamelCase__ :Tuple=0.1 , lowerCamelCase__ :List[str]=0.1 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :List[Any]=0.02 , lowerCamelCase__ :str=1e-7 , lowerCamelCase__ :List[Any]=1e-5 , lowerCamelCase__ :Tuple="group" , lowerCamelCase__ :List[Any]="gelu" , lowerCamelCase__ :List[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ :Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase__ :Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :Any=1_28 , lowerCamelCase__ :Any=16 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=0.05 , lowerCamelCase__ :List[Any]=10 , lowerCamelCase__ :Union[str, Any]=2 , lowerCamelCase__ :int=0.0 , lowerCamelCase__ :str=10 , lowerCamelCase__ :Any=0 , lowerCamelCase__ :str="mean" , lowerCamelCase__ :Union[str, Any]=False , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :Dict=2_56 , lowerCamelCase__ :str=0 , lowerCamelCase__ :Union[str, Any]=1 , lowerCamelCase__ :Any=2 , **lowerCamelCase__ :str , ):
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[Any] = feat_extract_norm
UpperCamelCase__ :Any = feat_extract_activation
UpperCamelCase__ :Any = list(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = list(lowerCamelCase__ )
UpperCamelCase__ :Any = list(lowerCamelCase__ )
UpperCamelCase__ :str = conv_bias
UpperCamelCase__ :Any = num_conv_pos_embeddings
UpperCamelCase__ :List[str] = num_conv_pos_embedding_groups
UpperCamelCase__ :List[Any] = len(self.conv_dim )
UpperCamelCase__ :Union[str, Any] = num_hidden_layers
UpperCamelCase__ :Dict = intermediate_size
UpperCamelCase__ :str = squeeze_factor
UpperCamelCase__ :Optional[Any] = max_position_embeddings
UpperCamelCase__ :Tuple = position_buckets
UpperCamelCase__ :int = share_att_key
UpperCamelCase__ :str = relative_attention
UpperCamelCase__ :List[str] = norm_rel_ebd
UpperCamelCase__ :Any = list(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Tuple = hidden_dropout
UpperCamelCase__ :int = attention_dropout
UpperCamelCase__ :List[str] = activation_dropout
UpperCamelCase__ :Optional[int] = feat_proj_dropout
UpperCamelCase__ :int = final_dropout
UpperCamelCase__ :Optional[int] = layer_norm_eps
UpperCamelCase__ :str = feature_layer_norm_eps
UpperCamelCase__ :Union[str, Any] = initializer_range
UpperCamelCase__ :int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ :int = apply_spec_augment
UpperCamelCase__ :int = mask_time_prob
UpperCamelCase__ :List[Any] = mask_time_length
UpperCamelCase__ :List[str] = mask_time_min_masks
UpperCamelCase__ :int = mask_feature_prob
UpperCamelCase__ :Optional[int] = mask_feature_length
UpperCamelCase__ :str = mask_feature_min_masks
# ctc loss
UpperCamelCase__ :str = ctc_loss_reduction
UpperCamelCase__ :Optional[int] = ctc_zero_infinity
# sequence classification
UpperCamelCase__ :str = use_weighted_layer_sum
UpperCamelCase__ :int = classifier_proj_size
@property
def __a ( self :List[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : int | float | str , lowercase__ : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
UpperCamelCase__ :Dict = int(lowercase__ )
UpperCamelCase__ :Union[str, Any] = int(lowercase__ )
UpperCamelCase__ :list[str] = []
for temp in range(int(lowercase__ ) ):
series.append(f"""1 / {pow(temp + 1 , int(lowercase__ ) )}""" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = int(input("Enter the last number (nth term) of the P-Series"))
UpperCamelCase = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
def A ( lowercase__ : int = 50 ) -> int:
UpperCamelCase__ :Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = """longformer"""
def __init__( self :List[str] , lowerCamelCase__ :Union[List[int], int] = 5_12 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :int = 1 , lowerCamelCase__ :int = 0 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :int = 3_05_22 , lowerCamelCase__ :int = 7_68 , lowerCamelCase__ :int = 12 , lowerCamelCase__ :int = 12 , lowerCamelCase__ :int = 30_72 , lowerCamelCase__ :str = "gelu" , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :float = 0.1 , lowerCamelCase__ :int = 5_12 , lowerCamelCase__ :int = 2 , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1e-12 , lowerCamelCase__ :bool = False , **lowerCamelCase__ :Tuple , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :List[Any] = attention_window
UpperCamelCase__ :Union[str, Any] = sep_token_id
UpperCamelCase__ :Tuple = bos_token_id
UpperCamelCase__ :Optional[Any] = eos_token_id
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :str = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Dict = num_attention_heads
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Any = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Any = type_vocab_size
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :Optional[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = onnx_export
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :"PretrainedConfig" , lowerCamelCase__ :str = "default" , lowerCamelCase__ :"List[PatchingSpec]" = None ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Any = True
@property
def __a ( self :Optional[Any] ):
if self.task == "multiple-choice":
UpperCamelCase__ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase__ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def __a ( self :List[str] ):
UpperCamelCase__ :int = super().outputs
if self.task == "default":
UpperCamelCase__ :Tuple = {0: """batch"""}
return outputs
@property
def __a ( self :List[Any] ):
return 1e-4
@property
def __a ( self :Tuple ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __a ( self :List[Any] , lowerCamelCase__ :"PreTrainedTokenizerBase" , lowerCamelCase__ :int = -1 , lowerCamelCase__ :int = -1 , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[TensorType] = None , ):
UpperCamelCase__ :Any = super().generate_dummy_inputs(
preprocessor=lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCamelCase__ :Union[str, Any] = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
UpperCamelCase__ :Union[str, Any] = 1
return inputs
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
def A ( lowercase__ : type , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None , ) -> int:
UpperCamelCase__ :Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
UpperCamelCase__ :Union[str, Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
UpperCamelCase__ :List[Any] = format_type
def A ( lowercase__ : Exception , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None ) -> List[Any]:
UpperCamelCase__ :Any = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCamelCase__ :Optional[int] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
UpperCamelCase = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
UpperCamelCase = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
UpperCamelCase = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def A ( lowercase__ : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def A ( lowercase__ : Optional[str] , **lowercase__ : Optional[Any] ) -> Formatter:
UpperCamelCase__ :Union[str, Any] = get_format_type_from_alias(lowercase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
import random
from typing import Any
def A ( lowercase__ : list ) -> list[Any]:
for _ in range(len(lowercase__ ) ):
UpperCamelCase__ :Optional[int] = random.randint(0 , len(lowercase__ ) - 1 )
UpperCamelCase__ :Optional[int] = random.randint(0 , len(lowercase__ ) - 1 )
UpperCamelCase__ , UpperCamelCase__ :List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase = "\\n Text data.\n Second line of data."
UpperCamelCase = "file"
@pytest.fixture(scope="""session""" )
def A ( lowercase__ : List[str] ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCamelCase__ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def A ( lowercase__ : str ) -> int:
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , """w""" ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase__ :List[Any] = input_paths[compression_format]
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Dict = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
UpperCamelCase__ :int = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
UpperCamelCase__ :int = f.read()
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[Any] ) -> List[str]:
UpperCamelCase__ :Dict = """custom_cache"""
UpperCamelCase__ :Union[str, Any] = """custom_extracted_dir"""
UpperCamelCase__ :Optional[int] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase__ :Union[str, Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase__ :Optional[int] = xz_file
UpperCamelCase__ :Optional[Any] = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
UpperCamelCase__ :str = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def A ( lowercase__ : List[str] ) -> Dict:
# absolute path
UpperCamelCase__ :Optional[Any] = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
UpperCamelCase__ :str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def A ( lowercase__ : Optional[Any] ) -> Tuple:
# absolute path
UpperCamelCase__ :Tuple = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
UpperCamelCase__ :Tuple = """./__missing_file__.txt"""
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Any = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( ) -> Tuple:
with pytest.raises(lowercase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
http_get("""https://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : List[Any] ) -> int:
UpperCamelCase__ :List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head("""s3://huggingface.co""" )
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCamelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : str , lowercase__ : List[str] ) -> int:
for attribute in key.split(""".""" ):
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , lowercase__ ).shape
else:
UpperCamelCase__ :Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase__ :Optional[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ :List[Any] = value
elif weight_type == "weight_v":
UpperCamelCase__ :Tuple = value
elif weight_type == "bias":
UpperCamelCase__ :int = value
else:
UpperCamelCase__ :Tuple = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A ( lowercase__ : int , lowercase__ : Dict ) -> Dict:
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = fairseq_model.state_dict()
UpperCamelCase__ :str = hf_model.feature_extractor
UpperCamelCase__ :int = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase__ :Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCamelCase__ :str = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ :Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCamelCase__ :List[str] = True
if "*" in mapped_key:
UpperCamelCase__ :str = name.split(lowercase__ )[0].split(""".""" )[-2]
UpperCamelCase__ :Tuple = mapped_key.replace("""*""" , lowercase__ )
if "weight_g" in name:
UpperCamelCase__ :Tuple = """weight_g"""
elif "weight_v" in name:
UpperCamelCase__ :Union[str, Any] = """weight_v"""
elif "bias" in name:
UpperCamelCase__ :int = """bias"""
elif "weight" in name:
UpperCamelCase__ :int = """weight"""
else:
UpperCamelCase__ :Tuple = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : int ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = full_name.split("""conv_layers.""" )[-1]
UpperCamelCase__ :Optional[Any] = name.split(""".""" )
UpperCamelCase__ :Tuple = int(items[0] )
UpperCamelCase__ :Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase__ :List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase__ :List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase__ :Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase__ :Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
def A ( lowercase__ : int , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : str ) -> Any:
UpperCamelCase__ :List[Any] = full_name.split("""adaptor.""" )[-1]
UpperCamelCase__ :Union[str, Any] = name.split(""".""" )
if items[1].isdigit():
UpperCamelCase__ :Optional[Any] = int(items[1] )
else:
UpperCamelCase__ :Dict = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
UpperCamelCase__ :Optional[int] = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
UpperCamelCase__ :Union[str, Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
UpperCamelCase__ :List[Any] = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
UpperCamelCase__ :Any = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(lowercase__ , lowercase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
UpperCamelCase__ :Dict = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
UpperCamelCase__ :Optional[int] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
def A ( lowercase__ : int ) -> List[str]:
UpperCamelCase__ , UpperCamelCase__ :Dict = emb.weight.shape
UpperCamelCase__ :Any = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
UpperCamelCase__ :int = emb.weight.data
return lin_layer
@torch.no_grad()
def A ( lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , ) -> Union[str, Any]:
UpperCamelCase__ :Union[str, Any] = WavaVecaConfig.from_pretrained(
lowercase__ , add_adapter=lowercase__ , adapter_stride=lowercase__ , adapter_kernel_size=lowercase__ , use_auth_token=lowercase__ , output_hidden_size=lowercase__ , )
UpperCamelCase__ :List[str] = MBartConfig.from_pretrained(lowercase__ )
# load model
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
UpperCamelCase__ :Tuple = model[0].eval()
# load feature extractor
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase__ , use_auth_token=lowercase__ )
# set weights for wav2vec2 encoder
UpperCamelCase__ :List[Any] = WavaVecaModel(lowercase__ )
recursively_load_weights_wavaveca(model.encoder , lowercase__ )
# load decoder weights
UpperCamelCase__ :List[str] = MBartForCausalLM(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowercase__ )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCamelCase__ :Tuple = SpeechEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ )
UpperCamelCase__ :List[Any] = False
UpperCamelCase__ :Optional[Any] = MBartaaTokenizer(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
UpperCamelCase__ :Optional[int] = hf_wavavec.config.to_dict()
UpperCamelCase__ :List[Any] = tokenizer.pad_token_id
UpperCamelCase__ :Optional[int] = tokenizer.bos_token_id
UpperCamelCase__ :Union[str, Any] = tokenizer.eos_token_id
UpperCamelCase__ :List[str] = """mbart50"""
UpperCamelCase__ :List[Any] = """wav2vec2"""
UpperCamelCase__ :int = tokenizer.eos_token_id
UpperCamelCase__ :Tuple = 25_0004
UpperCamelCase__ :Optional[Any] = tokenizer.eos_token_id
UpperCamelCase__ :Any = SpeechEncoderDecoderConfig.from_dict(lowercase__ )
hf_wavavec.save_pretrained(lowercase__ )
feature_extractor.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
UpperCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Dict:
UpperCamelCase__ :Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
UpperCamelCase__ :List[str] = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
UpperCamelCase__ :Union[str, Any] = parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[Any] = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = """new-model"""
if is_tf_available():
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Any = NewModelConfig
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = """bert-base-cased"""
UpperCamelCase__ :Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :List[Any] ):
UpperCamelCase__ :Dict = """bert-base-cased"""
UpperCamelCase__ :Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :List[Any] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :str = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :int = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :List[str] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Any = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Dict ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCamelCase__ :List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCamelCase__ :Tuple = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __a ( self :Optional[Any] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
UpperCamelCase__ :Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :str = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :int ):
UpperCamelCase__ :Dict = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def __a ( self :str ):
UpperCamelCase__ :List[Any] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def __a ( self :Tuple ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
UpperCamelCase__ :Optional[Any] = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = copy.deepcopy(model.config )
UpperCamelCase__ :int = ["""FunnelBaseModel"""]
UpperCamelCase__ :Union[str, Any] = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
try:
AutoConfig.register("""new-model""" , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Dict = BertModelTester(self ).get_config()
UpperCamelCase__ :Optional[Any] = NewModelConfig(**tiny_config.to_dict() )
UpperCamelCase__ :Any = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :int = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __a ( self :List[str] ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Optional[Any] = TFAutoModel.from_pretrained("""bert-base""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = TFAutoModel.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :Optional[int] ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
UpperCamelCase__ :List[str] = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Tuple ):
with self.assertRaisesRegex(lowerCamelCase__ , """Use `from_pt=True` to load this model""" ):
UpperCamelCase__ :Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __a ( self :Optional[int] ):
# Make sure we have cached the model.
UpperCamelCase__ :Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
UpperCamelCase__ :List[Any] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
UpperCamelCase__ :Optional[int] = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
UpperCamelCase__ :Dict = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( lowercase__ : Optional[int] ) -> int:
UpperCamelCase__ :Optional[int] = os.path.join(args.tf_model_dir , """parameters.json""" )
UpperCamelCase__ :List[str] = json.loads(open(lowercase__ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
UpperCamelCase__ :Union[str, Any] = args.output + """.pt"""
UpperCamelCase__ :str = OrderedDict()
with tf.device("""/CPU:0""" ):
UpperCamelCase__ :List[str] = tf.train.load_checkpoint(args.tf_model_dir )
UpperCamelCase__ :Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCamelCase__ :Dict = reader.get_tensor(lowercase__ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
UpperCamelCase__ :Optional[int] = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
UpperCamelCase__ :Tuple = 8
UpperCamelCase__ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCamelCase__ :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif key_name.startswith("""model/moe""" ):
UpperCamelCase__ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
UpperCamelCase__ :int = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
UpperCamelCase__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Optional[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/softmlp/kernel""" ):
UpperCamelCase__ :Any = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
UpperCamelCase__ :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Dict = torch.tensor(lowercase__ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
UpperCamelCase__ :Dict = key_name[-9:-7]
for i in range(16 ):
UpperCamelCase__ :Optional[Any] = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
UpperCamelCase__ :int = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCamelCase__ :int = torch.tensor(lowercase__ )
elif key_name.startswith("""model/mlp""" ):
UpperCamelCase__ :Dict = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
UpperCamelCase__ :Optional[Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
UpperCamelCase__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/p1/bias""" ):
UpperCamelCase__ :Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
UpperCamelCase__ :Dict = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[str] = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/kernel""" ):
UpperCamelCase__ :List[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
UpperCamelCase__ :int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Any = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/bias""" ):
UpperCamelCase__ :Optional[int] = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
UpperCamelCase__ :List[Any] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif key_name.startswith("""model/ln""" ):
UpperCamelCase__ :Optional[Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
UpperCamelCase__ :List[Any] = """model.blocks.%d.feed_forward.norm.bias""" % player
UpperCamelCase__ :List[str] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
UpperCamelCase__ :Any = """model.blocks.%d.feed_forward.norm.weight""" % player
UpperCamelCase__ :Dict = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[Any] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/att""" ):
UpperCamelCase__ :List[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
UpperCamelCase__ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCamelCase__ :Union[str, Any] = state[:, 0, :, :]
UpperCamelCase__ :Union[str, Any] = state[:, 1, :, :]
UpperCamelCase__ :List[Any] = state[:, 2, :, :]
UpperCamelCase__ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Tuple = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
UpperCamelCase__ :List[str] = torch.tensor(lowercase__ )
UpperCamelCase__ :Optional[int] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
UpperCamelCase__ :Optional[Any] = torch.tensor(lowercase__ )
UpperCamelCase__ :List[str] = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
UpperCamelCase__ :List[str] = torch.tensor(lowercase__ )
elif key_name.endswith("""/o/kernel""" ):
UpperCamelCase__ :Dict = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
UpperCamelCase__ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :int = torch.tensor(lowercase__ )
elif key_name.startswith("""model/an""" ):
UpperCamelCase__ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
UpperCamelCase__ :Optional[int] = """model.blocks.%d.self_attn.norm.bias""" % player
UpperCamelCase__ :Optional[int] = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
UpperCamelCase__ :str = """model.blocks.%d.self_attn.norm.weight""" % player
UpperCamelCase__ :Tuple = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :str = torch.tensor(lowercase__ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
UpperCamelCase__ :int = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
UpperCamelCase__ :List[Any] = """model.%s.weight""" % nlayer
UpperCamelCase__ :str = vnp.copy() # same in embedded
UpperCamelCase__ :Dict = torch.tensor(lowercase__ )
if key_name.startswith("""model/wte""" ):
UpperCamelCase__ :Any = """lm_head.weight"""
UpperCamelCase__ :Optional[int] = vnp.copy() # same in embedded
UpperCamelCase__ :Tuple = torch.tensor(lowercase__ )
elif key_name.startswith("""model/wob""" ):
UpperCamelCase__ :Dict = """final_logits_bias"""
UpperCamelCase__ :Optional[Any] = vnp.copy() # same in embedded
UpperCamelCase__ :Optional[Any] = state.reshape((1, -1) )
UpperCamelCase__ :Optional[Any] = torch.tensor(lowercase__ )
elif key_name == "model/dense/kernel":
UpperCamelCase__ :Tuple = """model.last_project.weight"""
UpperCamelCase__ :List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ :Tuple = torch.tensor(lowercase__ )
elif key_name == "model/dense_1/bias":
UpperCamelCase__ :List[Any] = """model.last_project.bias"""
UpperCamelCase__ :Any = vnp.copy() # same because it is one dimensional
UpperCamelCase__ :List[Any] = torch.tensor(lowercase__ )
torch.save(lowercase__ , args.output )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
UpperCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
class lowerCAmelCase_ : # Public class to implement a graph
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :list[list[bool]] ):
UpperCamelCase__ :List[str] = row
UpperCamelCase__ :List[Any] = col
UpperCamelCase__ :Dict = graph
def __a ( self :Dict , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :list[list[bool]] ):
# Checking all 8 elements surrounding nth element
UpperCamelCase__ :Union[str, Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCamelCase__ :Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCamelCase__ :List[Any] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ )
def __a ( self :str ): # And finally, count all islands.
UpperCamelCase__ :List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCamelCase__ :str = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += 1
return count
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
# Algorithm for the pigeonhole sorting
def A ( lowercase__ : Dict ) -> Optional[int]:
UpperCamelCase__ :Tuple = min(lowercase__ ) # min() finds the minimum value
UpperCamelCase__ :Optional[int] = max(lowercase__ ) # max() finds the maximum value
UpperCamelCase__ :str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase__ :List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase__ , lowercase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase__ :Optional[int] = 0
for count in range(lowercase__ ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase__ :Tuple = count + min_val
i += 1
def A ( ) -> Any:
UpperCamelCase__ :Dict = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase__ )
print("""Sorted order is:""" , """ """.join(lowercase__ ) )
if __name__ == "__main__":
main()
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
def A ( lowercase__ : list ) -> bool:
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(lowercase__ ) == 1:
return True
UpperCamelCase__ :Tuple = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A ( lowercase__ : list ) -> float:
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase__ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
UpperCamelCase__ :Union[str, Any] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 45 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def A ( lowercase__ : int , lowercase__ : Optional[Any] ) -> int:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def A ( lowercase__ : Optional[int] ) -> Any:
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowercase__ )
def A ( lowercase__ : str , lowercase__ : int ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCamelCase__ :Union[str, Any] = tmp_path_factory.getbasetemp() / """cache"""
UpperCamelCase__ :Union[str, Any] = test_hf_cache_home / """datasets"""
UpperCamelCase__ :Optional[int] = test_hf_cache_home / """metrics"""
UpperCamelCase__ :Tuple = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowercase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowercase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowercase__ ) )
UpperCamelCase__ :Union[str, Any] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Optional[Any] = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
@pytest.fixture(autouse=lowercase__ , scope="""session""" )
def A ( ) -> Optional[Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase__ )
def A ( lowercase__ : Tuple ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowercase__ )
@pytest.fixture
def A ( lowercase__ : Optional[int] ) -> Dict:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowercase__ )
| 45 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 45 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
UpperCamelCase = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def A ( lowercase__ : Union[str, Any] ) -> int:
UpperCamelCase__ :Optional[int] = EfficientNetConfig()
UpperCamelCase__ :Union[str, Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCamelCase__ :int = CONFIG_MAP[model_name]["""width_coef"""]
UpperCamelCase__ :int = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCamelCase__ :List[Any] = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase__ :List[str] = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCamelCase__ :Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCamelCase__ :str = """huggingface/label-files"""
UpperCamelCase__ :str = """imagenet-1k-id2label.json"""
UpperCamelCase__ :Tuple = 1000
UpperCamelCase__ :Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ :Union[str, Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCamelCase__ :Any = idalabel
UpperCamelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ :int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def A ( lowercase__ : int ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase__ :Any = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase__ , )
return preprocessor
def A ( lowercase__ : Union[str, Any] ) -> Any:
UpperCamelCase__ :Optional[int] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCamelCase__ :Union[str, Any] = sorted(set(lowercase__ ) )
UpperCamelCase__ :Any = len(lowercase__ )
UpperCamelCase__ :Tuple = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
UpperCamelCase__ :List[Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCamelCase__ :str = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCamelCase__ :Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase__ :List[str] = """efficientnet.""" + item[1]
UpperCamelCase__ :Dict = """classifier.weight"""
UpperCamelCase__ :int = """classifier.bias"""
return key_mapping
def A ( lowercase__ : str , lowercase__ : str , lowercase__ : List[str] ) -> List[str]:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase__ :Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase__ :int = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase__ :List[str] = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase__ :Any = torch.from_numpy(np.transpose(lowercase__ ) )
else:
UpperCamelCase__ :Any = torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : int ) -> Dict:
UpperCamelCase__ :str = model_classes[model_name](
include_top=lowercase__ , weights="""imagenet""" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1000 , classifier_activation="""softmax""" , )
UpperCamelCase__ :Optional[int] = original_model.trainable_variables
UpperCamelCase__ :Optional[Any] = original_model.non_trainable_variables
UpperCamelCase__ :Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase__ :int = param.numpy()
UpperCamelCase__ :Optional[int] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase__ :str = get_efficientnet_config(lowercase__ )
UpperCamelCase__ :Any = EfficientNetForImageClassification(lowercase__ ).eval()
UpperCamelCase__ :List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCamelCase__ :Union[str, Any] = rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
UpperCamelCase__ :int = convert_image_processor(lowercase__ )
UpperCamelCase__ :Any = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase__ :Dict = hf_model(**lowercase__ )
UpperCamelCase__ :Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase__ :str = False
UpperCamelCase__ :Tuple = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase__ :Optional[int] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase__ :Any = image.img_to_array(lowercase__ )
UpperCamelCase__ :Union[str, Any] = np.expand_dims(lowercase__ , axis=0 )
UpperCamelCase__ :List[Any] = original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase__ :Dict = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase = CLIPImageProcessor()
UpperCamelCase = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 45 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple=13 , lowerCamelCase__ :Tuple=7 , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :List[str]=99 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[Any]=5 , lowerCamelCase__ :Tuple=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :str="gelu" , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :Optional[int]=0.1 , lowerCamelCase__ :str=True , lowerCamelCase__ :Dict=5_12 , lowerCamelCase__ :Optional[Any]=16 , lowerCamelCase__ :Optional[Any]=2 , lowerCamelCase__ :Union[str, Any]=0.02 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :int=4 , lowerCamelCase__ :str=None , ):
UpperCamelCase__ :Optional[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :Dict = is_training
UpperCamelCase__ :List[str] = use_input_mask
UpperCamelCase__ :Optional[Any] = use_token_type_ids
UpperCamelCase__ :Tuple = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Optional[Any] = num_hidden_layers
UpperCamelCase__ :int = num_attention_heads
UpperCamelCase__ :Optional[int] = intermediate_multiple_size
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout
UpperCamelCase__ :List[Any] = attention_dropout
UpperCamelCase__ :List[str] = weight_tying
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Dict = type_vocab_size
UpperCamelCase__ :List[Any] = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :int = num_labels
UpperCamelCase__ :Dict = num_choices
UpperCamelCase__ :Any = scope
def __a ( self :Any ):
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :str = None
if self.use_input_mask:
UpperCamelCase__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self :Union[str, Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ :Optional[int] = True
return config, input_ids, input_mask, token_labels
def __a ( self :List[str] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any ):
UpperCamelCase__ :Union[str, Any] = GPTNeoXJapaneseModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] ):
UpperCamelCase__ :List[str] = True
UpperCamelCase__ :int = GPTNeoXJapaneseModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :List[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] ):
UpperCamelCase__ :Any = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :List[str] = GPTNeoXJapaneseForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ :List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ :Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ :Union[str, Any] = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ :Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __a ( self :Tuple ):
UpperCamelCase__ :int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[Any] = config_and_inputs
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_snake_case : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_snake_case : str = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_snake_case : Union[str, Any] = False
_snake_case : Dict = False
_snake_case : List[str] = False
_snake_case : Optional[int] = False
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = GPTNeoXJapaneseModelTester(self )
UpperCamelCase__ :Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Dict ):
self.config_tester.run_common_tests()
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ :Dict = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def __a ( self :int ):
UpperCamelCase__ :int = """abeja/gpt-neox-japanese-2.7b"""
UpperCamelCase__ :List[Any] = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
UpperCamelCase__ :Union[str, Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
UpperCamelCase__ :Any = GPTNeoXJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = []
for prompt in prompts:
UpperCamelCase__ :str = tokenizer(lowerCamelCase__ , return_tensors="""pt""" ).input_ids
UpperCamelCase__ :Union[str, Any] = model.generate(lowerCamelCase__ , max_length=50 )
UpperCamelCase__ :Dict = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 | 1 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :List[Any] = torch.manual_seed(0 )
UpperCamelCase__ :Any = pipe(
image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Optional[int] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 45 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 45 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = BertTokenizer
_snake_case : str = BertTokenizerFast
_snake_case : Optional[Any] = True
_snake_case : Optional[int] = True
_snake_case : Dict = filter_non_english
def __a ( self :List[str] ):
super().setUp()
UpperCamelCase__ :str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase__ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __a ( self :Tuple , lowerCamelCase__ :Dict ):
UpperCamelCase__ :Tuple = """UNwant\u00E9d,running"""
UpperCamelCase__ :Tuple = """unwanted, running"""
return input_text, output_text
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ :Union[str, Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [9, 6, 7, 12, 10, 11] )
def __a ( self :Dict ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ :Tuple = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_rust_tokenizer()
UpperCamelCase__ :str = """UNwant\u00E9d,running"""
UpperCamelCase__ :Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :List[str] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :int = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.get_rust_tokenizer()
UpperCamelCase__ :str = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Any = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# With lower casing
UpperCamelCase__ :Dict = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
UpperCamelCase__ :List[Any] = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
UpperCamelCase__ :List[str] = """UNwant\u00E9d,running"""
UpperCamelCase__ :List[Any] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase__ :Optional[Any] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ :Any = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :str ):
UpperCamelCase__ :str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :str = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __a ( self :str ):
UpperCamelCase__ :Optional[int] = BasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Any = BasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __a ( self :int ):
UpperCamelCase__ :int = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = BasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = BasicTokenizer(do_lower_case=lowerCamelCase__ , strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __a ( self :str ):
UpperCamelCase__ :str = BasicTokenizer(do_lower_case=lowerCamelCase__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __a ( self :str ):
UpperCamelCase__ :List[str] = BasicTokenizer()
UpperCamelCase__ :List[str] = """a\n'll !!to?'d of, can't."""
UpperCamelCase__ :Dict = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) , lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :str = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCamelCase__ :Optional[Any] = {}
for i, token in enumerate(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = i
UpperCamelCase__ :Optional[int] = WordpieceTokenizer(vocab=lowerCamelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __a ( self :Optional[int] ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __a ( self :Optional[int] ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __a ( self :List[Any] ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __a ( self :Tuple ):
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[int] = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
UpperCamelCase__ :str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCamelCase__ :Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def __a ( self :int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ :Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase__ :Any = tokenizer_r.encode_plus(
lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , )
UpperCamelCase__ :Optional[Any] = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ , """do_lower_case""" ) else False
UpperCamelCase__ :Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __a ( self :Dict ):
UpperCamelCase__ :Any = ["""的""", """人""", """有"""]
UpperCamelCase__ :Optional[Any] = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ :str = True
UpperCamelCase__ :int = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Tuple = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :List[str] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = False
UpperCamelCase__ :str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :int = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = tokenizer_r.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :str = tokenizer_p.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
UpperCamelCase__ :Dict = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ :Any = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 45 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A ( lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = {}
UpperCamelCase__ :Optional[int] = tokenizer(example["""content"""] , truncation=lowercase__ )["""input_ids"""]
UpperCamelCase__ :int = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 45 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = ["""image_processor""", """tokenizer"""]
_snake_case : int = """OwlViTImageProcessor"""
_snake_case : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :Dict , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :Tuple=None , **lowerCamelCase__ :List[str] ):
UpperCamelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase__ , )
UpperCamelCase__ :Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCamelCase__ :Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self :Union[str, Any] , lowerCamelCase__ :Optional[Any]=None , lowerCamelCase__ :Tuple=None , lowerCamelCase__ :Optional[int]=None , lowerCamelCase__ :Union[str, Any]="max_length" , lowerCamelCase__ :Dict="np" , **lowerCamelCase__ :Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(text[0] , lowerCamelCase__ )):
UpperCamelCase__ :Any = [self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(text[0] , lowerCamelCase__ ):
UpperCamelCase__ :Any = []
# Maximum number of queries across batch
UpperCamelCase__ :Dict = max([len(lowerCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase__ ) != max_num_queries:
UpperCamelCase__ :Optional[Any] = t + [""" """] * (max_num_queries - len(lowerCamelCase__ ))
UpperCamelCase__ :int = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
encodings.append(lowerCamelCase__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCamelCase__ :Any = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase__ :Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCamelCase__ :Dict = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase__ :Union[str, Any] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCamelCase__ :Optional[Any] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCamelCase__ :Any = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCamelCase__ :Optional[int] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase__ :Optional[Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCamelCase__ :Optional[int] = BatchEncoding()
UpperCamelCase__ :List[Any] = input_ids
UpperCamelCase__ :Optional[int] = attention_mask
if query_images is not None:
UpperCamelCase__ :List[Any] = BatchEncoding()
UpperCamelCase__ :int = self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ).pixel_values
UpperCamelCase__ :List[Any] = query_pixel_values
if images is not None:
UpperCamelCase__ :Union[str, Any] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
UpperCamelCase__ :int = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCamelCase__ :Dict = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def __a ( self :List[Any] , *lowerCamelCase__ :Optional[Any] , **lowerCamelCase__ :List[str] ):
return self.image_processor.post_process(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :List[str] , *lowerCamelCase__ :Union[str, Any] , **lowerCamelCase__ :Tuple ):
return self.image_processor.post_process_object_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :Optional[int] , *lowerCamelCase__ :Tuple , **lowerCamelCase__ :int ):
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :int , *lowerCamelCase__ :Dict , **lowerCamelCase__ :Any ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :List[Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :List[Any] ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __a ( self :List[Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase__ , )
return self.image_processor_class
@property
def __a ( self :Dict ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase__ , )
return self.image_processor
| 45 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 45 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCamelCase = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] , lowerCamelCase__ :Path , lowerCamelCase__ :Union[str, None] = None , lowerCamelCase__ :Union[List[str], None] = None , lowerCamelCase__ :Union[str, List[str], None] = None , lowerCamelCase__ :bool = True , ):
UpperCamelCase__ :Dict = [file for file in os.listdir(lowerCamelCase__ ) if os.path.isfile(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )]
if identifier is not None:
UpperCamelCase__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for n_ in n_identifier:
UpperCamelCase__ :Tuple = [file for file in files if n_ not in file]
else:
UpperCamelCase__ :int = [file for file in files if n_identifier not in file]
UpperCamelCase__ :List[Any] = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCamelCase__ :Dict = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowerCamelCase__ )
if only_modules:
UpperCamelCase__ :int = file.split(""".""" )[0]
try:
UpperCamelCase__ :Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = doctest.DocTestSuite(lowerCamelCase__ )
UpperCamelCase__ :Tuple = unittest.TextTestRunner().run(lowerCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
UpperCamelCase__ :Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __a ( self :Optional[int] ):
UpperCamelCase__ :int = Path("""src/transformers""" )
UpperCamelCase__ :Union[str, Any] = """modeling"""
UpperCamelCase__ :List[Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ , ignore_files=lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :Tuple = Path("""src/transformers""" )
UpperCamelCase__ :int = """tokenization"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Tuple = Path("""src/transformers""" )
UpperCamelCase__ :List[str] = """configuration"""
self.analyze_directory(lowerCamelCase__ , identifier=lowerCamelCase__ )
def __a ( self :int ):
UpperCamelCase__ :Optional[int] = Path("""src/transformers""" )
UpperCamelCase__ :int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowerCamelCase__ , n_identifier=lowerCamelCase__ )
def __a ( self :Optional[Any] ):
UpperCamelCase__ :int = Path("""docs/source""" )
UpperCamelCase__ :str = ["""favicon.ico"""]
self.analyze_directory(lowerCamelCase__ , ignore_files=lowerCamelCase__ , only_modules=lowerCamelCase__ )
| 45 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :str = SavedModel()
UpperCamelCase__ :List[str] = []
with open(os.path.join(lowercase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
UpperCamelCase__ :str = json.load(lowercase__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowercase__ )] )
with open(lowercase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
UpperCamelCase__ :Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCamelCase__ :Union[str, Any] = sorted(lowercase__ )
UpperCamelCase__ :List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowercase__ )
if strict and len(lowercase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowercase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowercase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 45 | 1 |
from math import factorial
UpperCamelCase = {str(d): factorial(d) for d in range(10)}
def A ( lowercase__ : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase__ ) )
def A ( ) -> int:
UpperCamelCase__ :Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase__ ) if sum_of_digit_factorial(lowercase__ ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 45 | 1 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 45 | 1 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A ( lowercase__ : str , lowercase__ : str ) -> str | Literal[False]:
UpperCamelCase__ :Any = list(lowercase__ )
UpperCamelCase__ :Optional[Any] = list(lowercase__ )
UpperCamelCase__ :Union[str, Any] = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase__ :str = """_"""
if count > 1:
return False
else:
return "".join(lowercase__ )
def A ( lowercase__ : list[str] ) -> list[str]:
UpperCamelCase__ :int = []
while True:
UpperCamelCase__ :Tuple = ["""$"""] * len(lowercase__ )
UpperCamelCase__ :Tuple = []
for i in range(len(lowercase__ ) ):
for j in range(i + 1 , len(lowercase__ ) ):
UpperCamelCase__ :str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCamelCase__ :str = """*"""
UpperCamelCase__ :Union[str, Any] = """*"""
temp.append("""X""" )
for i in range(len(lowercase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowercase__ ) == 0:
return pi
UpperCamelCase__ :Tuple = list(set(lowercase__ ) )
def A ( lowercase__ : int , lowercase__ : Sequence[float] ) -> list[str]:
UpperCamelCase__ :int = []
for minterm in minterms:
UpperCamelCase__ :Any = """"""
for _ in range(lowercase__ ):
UpperCamelCase__ :Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowercase__ )
return temp
def A ( lowercase__ : str , lowercase__ : str , lowercase__ : int ) -> bool:
UpperCamelCase__ :Tuple = list(lowercase__ )
UpperCamelCase__ :List[str] = list(lowercase__ )
UpperCamelCase__ :Any = 0
for i in range(len(lowercase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A ( lowercase__ : list[list[int]] , lowercase__ : list[str] ) -> list[str]:
UpperCamelCase__ :int = []
UpperCamelCase__ :Union[str, Any] = [0] * len(lowercase__ )
for i in range(len(chart[0] ) ):
UpperCamelCase__ :str = 0
UpperCamelCase__ :Dict = -1
for j in range(len(lowercase__ ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase__ :int = j
if count == 1:
UpperCamelCase__ :Optional[Any] = 1
for i in range(len(lowercase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowercase__ ) ):
UpperCamelCase__ :Tuple = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase__ :int = 0
UpperCamelCase__ :List[Any] = -1
UpperCamelCase__ :Any = 0
for i in range(len(lowercase__ ) ):
UpperCamelCase__ :Dict = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase__ :List[Any] = count_n
UpperCamelCase__ :Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowercase__ ) ):
UpperCamelCase__ :Optional[int] = 0
def A ( lowercase__ : list[str] , lowercase__ : list[str] ) -> list[list[int]]:
UpperCamelCase__ :Union[str, Any] = [[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )]
for i in range(len(lowercase__ ) ):
UpperCamelCase__ :Union[str, Any] = prime_implicants[i].count("""_""" )
for j in range(len(lowercase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowercase__ ):
UpperCamelCase__ :Union[str, Any] = 1
return chart
def A ( ) -> None:
UpperCamelCase__ :List[str] = int(input("""Enter the no. of variables\n""" ) )
UpperCamelCase__ :Union[str, Any] = [
float(lowercase__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCamelCase__ :List[Any] = decimal_to_binary(lowercase__ , lowercase__ )
UpperCamelCase__ :int = check(lowercase__ )
print("""Prime Implicants are:""" )
print(lowercase__ )
UpperCamelCase__ :List[str] = prime_implicant_chart(lowercase__ , lowercase__ )
UpperCamelCase__ :Tuple = selection(lowercase__ , lowercase__ )
print("""Essential Prime Implicants are:""" )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase = get_tests_dir("fixtures")
UpperCamelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCamelCase = get_tests_dir("fixtures/dummy-config.json")
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[int] = 0
def __a ( self :str ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCamelCase__ :Tuple = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
UpperCamelCase__ :Union[str, Any] = WavaVecaFeatureExtractor(**lowerCamelCase__ )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
config.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
# make sure private variable is not incorrectly saved
UpperCamelCase__ :Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Dict ):
with self.assertRaisesRegex(
lowerCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ :Dict = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self :List[Any] ):
with self.assertRaisesRegex(
lowerCamelCase__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , revision="""aaaaaa""" )
def __a ( self :int ):
with self.assertRaisesRegex(
lowerCamelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self :Optional[int] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :Any = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self :Dict ):
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCamelCase__ :Any = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[Any] = AutoFeatureExtractor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self :Optional[int] ):
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[int] = True
try:
AutoConfig.register("""custom""" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local
UpperCamelCase__ :Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCamelCase__ :str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCamelCase__ :Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(lowerCamelCase__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 45 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""image""": Image()} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "image"
_snake_case : str = "labels"
def __a ( self :Any , lowerCamelCase__ :Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(self )
UpperCamelCase__ :Any = self.label_schema.copy()
UpperCamelCase__ :int = features[self.label_column]
UpperCamelCase__ :Optional[Any] = label_schema
return task_template
@property
def __a ( self :List[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 45 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ):
super().__init__()
UpperCamelCase__ :Tuple = value_function
UpperCamelCase__ :Optional[int] = unet
UpperCamelCase__ :List[str] = scheduler
UpperCamelCase__ :Dict = env
UpperCamelCase__ :Dict = env.get_dataset()
UpperCamelCase__ :Union[str, Any] = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ :Any = {}
for key in self.data.keys():
try:
UpperCamelCase__ :int = self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ :List[Any] = env.observation_space.shape[0]
UpperCamelCase__ :List[str] = env.action_space.shape[0]
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ):
return (x_in - self.means[key]) / self.stds[key]
def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
return x_in * self.stds[key] + self.means[key]
def __a ( self :Any , lowerCamelCase__ :int ):
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ):
for key, val in cond.items():
UpperCamelCase__ :str = val.clone()
return x_in
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
UpperCamelCase__ :Any = x.shape[0]
UpperCamelCase__ :List[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ )
UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance )
UpperCamelCase__ :Dict = model_std * grad
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Dict = x.detach()
UpperCamelCase__ :int = x + scale * grad
UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ):
# normalize the observations and create batch dimension
UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" )
UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 )
UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )}
UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device )
UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
UpperCamelCase__ :Dict = x[sorted_idx]
UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim]
UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy()
UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" )
# select the action with the highest value
if y is not None:
UpperCamelCase__ :List[str] = 0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ )
UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0]
return denorm_actions
| 45 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def A ( lowercase__ : Union[str, Any] , lowercase__ : List[str]=False ) -> Dict:
UpperCamelCase__ :Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ :Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def A ( lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Any=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ :Dict = """"""
else:
UpperCamelCase__ :str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ :List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCamelCase__ :Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ :List[str] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ :Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ :Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ :Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def A ( lowercase__ : Any ) -> Optional[Any]:
UpperCamelCase__ :Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def A ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any ) -> Optional[Any]:
UpperCamelCase__ :Union[str, Any] = dct.pop(lowercase__ )
UpperCamelCase__ :Tuple = val
def A ( ) -> Any:
UpperCamelCase__ :List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ :Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def A ( lowercase__ : Any , lowercase__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase__ :Optional[int] = ViTConfig()
UpperCamelCase__ :List[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCamelCase__ :Union[str, Any] = True
UpperCamelCase__ :int = int(vit_name[-12:-10] )
UpperCamelCase__ :Any = int(vit_name[-9:-6] )
else:
UpperCamelCase__ :Optional[Any] = 1000
UpperCamelCase__ :int = """huggingface/label-files"""
UpperCamelCase__ :Any = """imagenet-1k-id2label.json"""
UpperCamelCase__ :int = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ :List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCamelCase__ :Union[str, Any] = idalabel
UpperCamelCase__ :Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ :Optional[Any] = int(vit_name[-6:-4] )
UpperCamelCase__ :Optional[int] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
UpperCamelCase__ :Optional[int] = 192
UpperCamelCase__ :Tuple = 768
UpperCamelCase__ :List[Any] = 12
UpperCamelCase__ :Optional[Any] = 3
elif vit_name[9:].startswith("""small""" ):
UpperCamelCase__ :int = 384
UpperCamelCase__ :Optional[int] = 1536
UpperCamelCase__ :Any = 12
UpperCamelCase__ :Optional[int] = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
UpperCamelCase__ :Optional[int] = 768
UpperCamelCase__ :Union[str, Any] = 2304
UpperCamelCase__ :Optional[Any] = 8
UpperCamelCase__ :int = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
UpperCamelCase__ :List[str] = 1024
UpperCamelCase__ :Optional[int] = 4096
UpperCamelCase__ :Union[str, Any] = 24
UpperCamelCase__ :List[Any] = 16
elif vit_name[4:].startswith("""huge""" ):
UpperCamelCase__ :int = 1280
UpperCamelCase__ :Union[str, Any] = 5120
UpperCamelCase__ :Tuple = 32
UpperCamelCase__ :Optional[Any] = 16
# load original model from timm
UpperCamelCase__ :Optional[int] = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ :Any = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
UpperCamelCase__ :Optional[int] = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCamelCase__ :int = ViTModel(lowercase__ ).eval()
else:
UpperCamelCase__ :List[str] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCamelCase__ :List[str] = DeiTImageProcessor(size=config.image_size )
else:
UpperCamelCase__ :str = ViTImageProcessor(size=config.image_size )
UpperCamelCase__ :List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCamelCase__ :Union[str, Any] = encoding["""pixel_values"""]
UpperCamelCase__ :Union[str, Any] = model(lowercase__ )
if base_model:
UpperCamelCase__ :str = timm_model.forward_features(lowercase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 )
else:
UpperCamelCase__ :int = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 45 |
def A ( lowercase__ : int ) -> bool:
if num < 0:
return False
UpperCamelCase__ :int = num
UpperCamelCase__ :int = 0
while num > 0:
UpperCamelCase__ :Optional[int] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Tuple = 2
UpperCamelCase__ :Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 |
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase__ :int = 0 ):
UpperCamelCase__ :List[str] = key
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :List[str] = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :int , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase__ ) ^ key ) for ch in content]
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Dict = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :List[str] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Any , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Tuple = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
UpperCamelCase__ :Optional[int] = """"""
for ch in content:
ans += chr(ord(lowerCamelCase__ ) ^ key )
return ans
def __a ( self :Optional[Any] , lowerCamelCase__ :str , lowerCamelCase__ :int = 0 ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ )
try:
with open(lowerCamelCase__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase__ , lowerCamelCase__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 45 | 1 |
UpperCamelCase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCamelCase = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> str:
assert len(str(lowercase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase__ :List[Any] = year // 100
UpperCamelCase__ :Optional[int] = (5 * (century % 4) + 2) % 7
UpperCamelCase__ :int = year % 100
UpperCamelCase__ :List[Any] = centurian % 12
UpperCamelCase__ :List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase__ :Tuple = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase__ :Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import random
def A ( lowercase__ : Dict , lowercase__ : str , lowercase__ : Optional[Any] ) -> int:
UpperCamelCase__ :List[Any] = a[left_index]
UpperCamelCase__ :Dict = left_index + 1
for j in range(left_index + 1 , lowercase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ :Tuple = a[i - 1], a[left_index]
return i - 1
def A ( lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Any ) -> Optional[int]:
if left < right:
UpperCamelCase__ :List[Any] = random.randint(lowercase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ :int = partition(lowercase__ , lowercase__ , lowercase__ )
quick_sort_random(
lowercase__ , lowercase__ , lowercase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowercase__ , pivot_index + 1 , lowercase__ ) # recursive quicksort to the right of the pivot point
def A ( ) -> List[Any]:
UpperCamelCase__ :str = input("""Enter numbers separated by a comma:\n""" ).strip()
UpperCamelCase__ :int = [int(lowercase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowercase__ , 0 , len(lowercase__ ) )
print(lowercase__ )
if __name__ == "__main__":
main()
| 45 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def A ( lowercase__ : int ) -> Optional[int]:
UpperCamelCase__ :Dict = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCamelCase__ :str = FileLock(str(tmpdir / """foo.lock""" ) )
UpperCamelCase__ :List[str] = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
UpperCamelCase__ :Optional[Any] = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def A ( lowercase__ : str ) -> int:
UpperCamelCase__ :str = """a""" * 1000 + """.lock"""
UpperCamelCase__ :str = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase__ :List[Any] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : Tuple = """dinat"""
_snake_case : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Optional[int] , lowerCamelCase__ :int=4 , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :List[Any]=64 , lowerCamelCase__ :Any=[3, 4, 6, 5] , lowerCamelCase__ :Tuple=[2, 4, 8, 16] , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :Tuple=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCamelCase__ :Tuple=3.0 , lowerCamelCase__ :str=True , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :Optional[Any]=0.0 , lowerCamelCase__ :int=0.1 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :Optional[Any]=0.02 , lowerCamelCase__ :Union[str, Any]=1e-5 , lowerCamelCase__ :Optional[int]=0.0 , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :List[Any] , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Any = patch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :int = embed_dim
UpperCamelCase__ :Optional[Any] = depths
UpperCamelCase__ :Any = len(lowerCamelCase__ )
UpperCamelCase__ :str = num_heads
UpperCamelCase__ :Optional[int] = kernel_size
UpperCamelCase__ :Optional[int] = dilations
UpperCamelCase__ :Tuple = mlp_ratio
UpperCamelCase__ :Dict = qkv_bias
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = drop_path_rate
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ :Tuple = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
UpperCamelCase__ :Tuple = layer_scale_init_value
UpperCamelCase__ :Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :List[str] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 45 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCamelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCamelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A ( lowercase__ : list[list[int]] ) -> list[list[int]]:
UpperCamelCase__ :List[str] = []
for i in range(len(lowercase__ ) ):
UpperCamelCase__ :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase__ :Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase__ :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def A ( lowercase__ : list[list[int]] , lowercase__ : int ) -> list[Image.Image]:
UpperCamelCase__ :Tuple = []
for _ in range(lowercase__ ):
# Create output image
UpperCamelCase__ :str = Image.new("""RGB""" , (len(cells[0] ), len(lowercase__ )) )
UpperCamelCase__ :Any = img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase__ :Dict = 255 - cells[y][x] * 255
UpperCamelCase__ :Union[str, Any] = (colour, colour, colour)
# Save image
images.append(lowercase__ )
UpperCamelCase__ :Union[str, Any] = new_generation(lowercase__ )
return images
if __name__ == "__main__":
UpperCamelCase = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 45 |
def A ( lowercase__ : int , lowercase__ : int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 45 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def A ( lowercase__ : Callable[[int | float], int | float] , lowercase__ : int | float , lowercase__ : int | float , lowercase__ : int = 100 , ) -> float:
UpperCamelCase__ :int = x_start
UpperCamelCase__ :List[Any] = fnc(lowercase__ )
UpperCamelCase__ :List[Any] = 0.0
for _ in range(lowercase__ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCamelCase__ :Optional[int] = (x_end - x_start) / steps + xa
UpperCamelCase__ :List[Any] = fnc(lowercase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCamelCase__ :Dict = xa
UpperCamelCase__ :List[str] = fxa
return length
if __name__ == "__main__":
def A ( lowercase__ : Dict ) -> List[str]:
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
UpperCamelCase = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any]=7 , lowerCamelCase__ :str=3 , lowerCamelCase__ :Optional[Any]=18 , lowerCamelCase__ :List[str]=30 , lowerCamelCase__ :str=4_00 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :Union[str, Any]=32 , lowerCamelCase__ :int=True , ):
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = num_channels
UpperCamelCase__ :List[str] = image_size
UpperCamelCase__ :Dict = min_resolution
UpperCamelCase__ :List[str] = max_resolution
UpperCamelCase__ :str = do_resize
UpperCamelCase__ :int = size_divisor
UpperCamelCase__ :Optional[int] = do_rescale
def __a ( self :str ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def __a ( self :Dict ):
UpperCamelCase__ :Dict = GLPNImageProcessingTester(self )
@property
def __a ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :Optional[int] ):
UpperCamelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size_divisor""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """resample""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_rescale""" ) )
def __a ( self :Optional[int] ):
pass
def __a ( self :Tuple ):
# Initialize image_processing
UpperCamelCase__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :str ):
# Initialize image_processing
UpperCamelCase__ :str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __a ( self :Any ):
# Initialize image_processing
UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 45 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Collection[float] | None = None ):
if components is None:
UpperCamelCase__ :Tuple = []
UpperCamelCase__ :Optional[Any] = list(lowerCamelCase__ )
def __len__( self :List[Any] ):
return len(self.__components )
def __str__( self :List[Any] ):
return "(" + ",".join(map(lowerCamelCase__ , self.__components ) ) + ")"
def __add__( self :List[Any] , lowerCamelCase__ :Vector ):
UpperCamelCase__ :Optional[int] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :List[Any] = [self.__components[i] + other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self :Any , lowerCamelCase__ :Vector ):
UpperCamelCase__ :List[Any] = len(self )
if size == len(lowerCamelCase__ ):
UpperCamelCase__ :Dict = [self.__components[i] - other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self :str , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Union[str, Any] , lowerCamelCase__ :Vector ):
...
def __mul__( self :List[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , (float, int) ):
UpperCamelCase__ :int = [c * other for c in self.__components]
return Vector(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(self ) == len(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = len(self )
UpperCamelCase__ :List[str] = [self.__components[i] * other.component(lowerCamelCase__ ) for i in range(lowerCamelCase__ )]
return sum(lowerCamelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __a ( self :Dict ):
return Vector(self.__components )
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :float ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :List[Any] = value
def __a ( self :Optional[Any] ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCamelCase__ :List[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCamelCase__ ) )
def __a ( self :Any , lowerCamelCase__ :Vector , lowerCamelCase__ :bool = False ):
UpperCamelCase__ :Tuple = self * other
UpperCamelCase__ :Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A ( lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ )
return Vector([0] * dimension )
def A ( lowercase__ : int , lowercase__ : int ) -> Vector:
assert isinstance(lowercase__ , lowercase__ ) and (isinstance(lowercase__ , lowercase__ ))
UpperCamelCase__ :List[Any] = [0] * dimension
UpperCamelCase__ :int = 1
return Vector(lowercase__ )
def A ( lowercase__ : float , lowercase__ : Vector , lowercase__ : Vector ) -> Vector:
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and (isinstance(lowercase__ , (int, float) ))
)
return x * scalar + y
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Vector:
random.seed(lowercase__ )
UpperCamelCase__ :Tuple = [random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase__ :list[list[float]] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
UpperCamelCase__ :List[Any] = matrix
UpperCamelCase__ :Dict = w
UpperCamelCase__ :int = h
def __str__( self :Tuple ):
UpperCamelCase__ :Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self :List[str] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Any = []
for i in range(self.__height ):
UpperCamelCase__ :Dict = [
self.__matrix[i][j] + other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self :List[Any] , lowerCamelCase__ :Matrix ):
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :List[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(lowerCamelCase__ , lowerCamelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCamelCase__ )
return Matrix(lowerCamelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self :Dict , lowerCamelCase__ :float ):
...
@overload
def __mul__( self :Optional[int] , lowerCamelCase__ :Vector ):
...
def __mul__( self :Optional[Any] , lowerCamelCase__ :float | Vector ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ): # matrix-vector
if len(lowerCamelCase__ ) == self.__width:
UpperCamelCase__ :List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Union[str, Any] = [
self.__matrix[i][j] * other.component(lowerCamelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCamelCase__ , sum(lowerCamelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCamelCase__ , (int, float) ): # matrix-scalar
UpperCamelCase__ :str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCamelCase__ , self.__width , self.__height )
return None
def __a ( self :Dict ):
return self.__height
def __a ( self :int ):
return self.__width
def __a ( self :List[str] , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :List[str] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __a ( self :int , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCamelCase__ :Any = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ :Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCamelCase__ , lowerCamelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __a ( self :int ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCamelCase__ ) for y in range(self.__width )
]
return sum(lowerCamelCase__ )
def A ( lowercase__ : int ) -> Matrix:
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__ , lowercase__ , lowercase__ )
def A ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> Matrix:
random.seed(lowercase__ )
UpperCamelCase__ :list[list[float]] = [
[random.randint(lowercase__ , lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__ , lowercase__ , lowercase__ )
| 45 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 45 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = torch.device("cpu")
def A ( ) -> Dict:
UpperCamelCase__ :Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ :Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def A ( lowercase__ : Tuple ) -> Any:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def A ( lowercase__ : Dict , lowercase__ : int , lowercase__ : Dict ) -> List[Any]:
UpperCamelCase__ :Tuple = dct.pop(lowercase__ )
UpperCamelCase__ :Any = val
def A ( lowercase__ : Union[str, Any] ) -> int:
UpperCamelCase__ :int = []
for k in state_dict.keys():
UpperCamelCase__ :Dict = k
if ".pwconv" in k:
UpperCamelCase__ :Any = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
UpperCamelCase__ :str = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
UpperCamelCase__ :Union[str, Any] = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
UpperCamelCase__ :int = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
UpperCamelCase__ :Union[str, Any] = k_new.split(""".""" )
if ls[2].isdigit():
UpperCamelCase__ :List[str] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
UpperCamelCase__ :List[Any] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def A ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[Any] ) -> List[Any]:
UpperCamelCase__ :str = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase__ :Any = 1000
UpperCamelCase__ :int = """huggingface/label-files"""
UpperCamelCase__ :Dict = """imagenet-1k-id2label.json"""
UpperCamelCase__ :Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase__ :str = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCamelCase__ :Optional[int] = idalabel
UpperCamelCase__ :str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase__ :Optional[int] = [3, 3, 6, 4]
UpperCamelCase__ :Dict = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase__ :Tuple = [3, 3, 9, 6]
UpperCamelCase__ :Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase__ :int = [4, 3, 10, 5]
UpperCamelCase__ :Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase__ :Any = [4, 4, 12, 6]
UpperCamelCase__ :Union[str, Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
UpperCamelCase__ :Any = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" , check_hash=lowercase__ )
else:
UpperCamelCase__ :str = torch.load(lowercase__ , map_location="""cpu""" )
UpperCamelCase__ :Optional[Any] = checkpoint
UpperCamelCase__ :Optional[int] = create_rename_keys(lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
UpperCamelCase__ :Optional[int] = SwiftFormerForImageClassification(lowercase__ ).eval()
hf_model.load_state_dict(lowercase__ )
# prepare test inputs
UpperCamelCase__ :Dict = prepare_img()
UpperCamelCase__ :int = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
UpperCamelCase__ :Union[str, Any] = processor(images=lowercase__ , return_tensors="""pt""" )
# compare outputs from both models
UpperCamelCase__ :int = get_expected_output(lowercase__ )
UpperCamelCase__ :Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase__ , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 45 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45 | 1 |
def A ( lowercase__ : Union[str, Any] ) -> List[Any]:
UpperCamelCase__ :Dict = []
UpperCamelCase__ :List[str] = []
UpperCamelCase__ :Optional[Any] = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
UpperCamelCase__ :Union[str, Any] = len(lowercase__ ) if (len(lowercase__ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowercase__ ) , """Postfix""".center(lowercase__ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase__ ) == 0:
stack.append(lowercase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase__ ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowercase__ )).ljust(lowercase__ ) , ("""""".join(lowercase__ )).ljust(lowercase__ ) , sep=""" | """ , ) # Output in tabular format
while len(lowercase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowercase__ )).ljust(lowercase__ ) , ("""""".join(lowercase__ )).ljust(lowercase__ ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowercase__ ) # return Postfix as str
def A ( lowercase__ : int ) -> int:
UpperCamelCase__ :str = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase__ ) ):
if infix[i] == "(":
UpperCamelCase__ :Any = """)""" # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase__ :int = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(lowercase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCamelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCamelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 45 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __a ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self :List[Any] ):
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :Any = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
UpperCamelCase__ :List[str] = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :str = generator.manual_seed(0 )
UpperCamelCase__ :str = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = """cyberpunk 2077"""
UpperCamelCase__ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ :str = torch.manual_seed(0 )
UpperCamelCase__ :Dict = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.75 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ :Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :List[Any] = """A painting of a squirrel eating a burger """
UpperCamelCase__ :List[str] = torch.manual_seed(0 )
UpperCamelCase__ :Optional[int] = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ :str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ :Optional[int] = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
UpperCamelCase__ :int = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ :List[Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 45 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.