code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
from __future__ import annotations
_snake_case = "#"
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : dict = {}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._trie
for char in text:
if char not in trie:
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : str = trie[char]
_lowerCAmelCase : str = True
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._trie
for char in prefix:
if char in trie:
_lowerCAmelCase : List[Any] = trie[char]
else:
return []
return self._elements(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for c, v in d.items():
_lowerCAmelCase : List[Any] = [" "] if c == END else [(c + s) for s in self._elements(__a)]
result.extend(__a)
return tuple(__a)
_snake_case = Trie()
_snake_case = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = trie.find_word(_lowerCamelCase )
return tuple(string + word for word in suffixes )
def A ( ):
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCAmelCase : str = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCAmelCase : Tuple = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase : List[Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCAmelCase : Optional[int] = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_lowerCamelCase )-1}" )
if "norm" in key:
_lowerCAmelCase : Dict = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase : Tuple = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCAmelCase : str = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_lowerCamelCase )-1}" )
if "layer_norm1" in key:
_lowerCAmelCase : List[str] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCAmelCase : Dict = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase : Any = key[key.find("block" ) + len("block" )]
_lowerCAmelCase : Dict = key.replace(F"block{idx}" , F"block.{int(_lowerCamelCase )-1}" )
if "attn.q" in key:
_lowerCAmelCase : Union[str, Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCAmelCase : Any = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCAmelCase : Optional[int] = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCAmelCase : List[str] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCAmelCase : Any = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCAmelCase : Optional[Any] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCAmelCase : List[Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCAmelCase : Union[str, Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase : List[str] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCAmelCase : Optional[Any] = key.replace(F"linear_c{idx}" , F"linear_c.{int(_lowerCamelCase )-1}" )
if key.startswith("head" ):
_lowerCAmelCase : Union[str, Any] = key.replace("head" , "classifier" )
_lowerCAmelCase : int = value
return new_state_dict
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase : Dict = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
_lowerCAmelCase : Union[str, Any] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase : Tuple = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SegformerConfig()
_lowerCAmelCase : Optional[Any] = False
# set attributes based on model_name
_lowerCAmelCase : Tuple = "huggingface/label-files"
if "segformer" in model_name:
_lowerCAmelCase : Dict = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCAmelCase : Tuple = 150
_lowerCAmelCase : int = "ade20k-id2label.json"
_lowerCAmelCase : Optional[int] = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCAmelCase : Union[str, Any] = 19
_lowerCAmelCase : Any = "cityscapes-id2label.json"
_lowerCAmelCase : List[str] = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = model_name[4:6]
_lowerCAmelCase : Optional[Any] = 1_000
_lowerCAmelCase : int = "imagenet-1k-id2label.json"
_lowerCAmelCase : List[str] = (1, 1_000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCAmelCase : str = [64, 128, 320, 512]
_lowerCAmelCase : Optional[Any] = 256
elif size == "b2":
_lowerCAmelCase : str = [64, 128, 320, 512]
_lowerCAmelCase : Optional[Any] = 768
_lowerCAmelCase : Optional[Any] = [3, 4, 6, 3]
elif size == "b3":
_lowerCAmelCase : Tuple = [64, 128, 320, 512]
_lowerCAmelCase : List[Any] = 768
_lowerCAmelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCAmelCase : List[str] = [64, 128, 320, 512]
_lowerCAmelCase : Union[str, Any] = 768
_lowerCAmelCase : Tuple = [3, 8, 27, 3]
elif size == "b5":
_lowerCAmelCase : int = [64, 128, 320, 512]
_lowerCAmelCase : Dict = 768
_lowerCAmelCase : Optional[Any] = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
_lowerCAmelCase : Any = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCAmelCase : Any = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[str] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCAmelCase : List[str] = model(_lowerCamelCase )
_lowerCAmelCase : str = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCAmelCase : List[str] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCAmelCase : Dict = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCAmelCase : List[Any] = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCAmelCase : Any = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCAmelCase : Any = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCAmelCase : Optional[int] = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCAmelCase : Dict = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCAmelCase : Tuple = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCAmelCase : List[str] = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCAmelCase : str = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_lowerCAmelCase : Optional[Any] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = "base_with_context"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
_lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : Optional[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Dict = ly_weight["attention"]
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
_lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : List[str] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : List[Any] = ly_weight["attention"]
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase : List[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : int = ly_weight["self_attention"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : int = ly_weight["MultiHeadDotProductAttention_0"]
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase : Optional[int] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
_lowerCAmelCase : int = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
_lowerCAmelCase : Any = os.path.join(args.checkpoint_path , ".." , "config.gin" )
_lowerCAmelCase : Dict = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
_lowerCAmelCase : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
_lowerCAmelCase : List[str] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase : List[str] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
_lowerCAmelCase : int = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
_lowerCAmelCase : List[str] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
_lowerCAmelCase : List[str] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
_lowerCAmelCase : int = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case = parser.parse_args()
main(args)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'swin2sr'
lowerCamelCase__ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=64, __a=1, __a=3, __a=180, __a=[6, 6, 6, 6, 6, 6], __a=[6, 6, 6, 6, 6, 6], __a=8, __a=2.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=2, __a=1.0, __a="1conv", __a="pixelshuffle", **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Dict = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Optional[int] = len(__a)
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[Any] = use_absolute_embeddings
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = upscale
_lowerCAmelCase : Optional[int] = img_range
_lowerCAmelCase : str = resi_connection
_lowerCAmelCase : Optional[int] = upsampler
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=2, __a=24, __a=16, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, __a=None, __a=2, __a=2, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : int = max_length
_lowerCAmelCase : str = num_mel_bins
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Any = frequency_stride
_lowerCAmelCase : Optional[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCAmelCase : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCAmelCase : List[str] = frequency_out_dimension * time_out_dimension
_lowerCAmelCase : Any = num_patches + 2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = self.get_config()
return config, input_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ASTModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = ASTModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__a)
_lowerCAmelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : int = ["input_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = ASTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = torchaudio.load(_lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593")
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.default_feature_extractor
_lowerCAmelCase : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(__a)
_lowerCAmelCase : Any = self.default_feature_extractor
_lowerCAmelCase , _lowerCAmelCase : Dict = prepare_audio()
_lowerCAmelCase : str = audio.squeeze().numpy()
_lowerCAmelCase : str = feature_extractor(__a, sampling_rate=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : List[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
from __future__ import annotations
from collections.abc import Callable
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 100 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = x_start
_lowerCAmelCase : str = fnc(_lowerCamelCase )
_lowerCAmelCase : int = 0.0
for _ in range(_lowerCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCAmelCase : Optional[int] = (x_end - x_start) / steps + xa
_lowerCAmelCase : Tuple = fnc(_lowerCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCAmelCase : Dict = xa
_lowerCAmelCase : Any = fxa
return area
if __name__ == "__main__":
def A ( _lowerCamelCase ):
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 10_0000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=50, __a=0.02, __a=True, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Union[str, Any] = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=__a, initializer_range=self.initializer_range, )
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase : int = True
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BertGenerationEncoder(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = model(__a, attention_mask=__a)
_lowerCAmelCase : str = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = BertGenerationEncoder(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, )
_lowerCAmelCase : List[str] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Dict = BertGenerationDecoder(config=__a).to(__a).eval()
# first forward pass
_lowerCAmelCase : List[str] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, use_cache=__a, )
_lowerCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowerCAmelCase : Tuple = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
_lowerCAmelCase : List[Any] = torch.cat([input_ids, next_tokens], dim=-1)
_lowerCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1)
_lowerCAmelCase : Optional[int] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, output_hidden_states=__a, )["hidden_states"][0]
_lowerCAmelCase : Optional[int] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, past_key_values=__a, output_hidden_states=__a, )["hidden_states"][0]
# select random slice
_lowerCAmelCase : int = ids_tensor((1,), output_from_past.shape[-1]).item()
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a, __a, atol=1E-3))
def snake_case__ ( self, __a, __a, __a, __a, *__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = BertGenerationDecoder(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Any = model(__a, attention_mask=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCamelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = BertGenerationEncoderTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Optional[int] = "bert"
self.model_tester.create_and_check_model(__a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(
__a, __a, __a, __a, __a, __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__a)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
_lowerCAmelCase : Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]])
with torch.no_grad():
_lowerCAmelCase : Any = model(__a)[0]
_lowerCAmelCase : Any = torch.Size([1, 8, 1024])
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3], __a, atol=1E-4))
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
_lowerCAmelCase : List[str] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]])
with torch.no_grad():
_lowerCAmelCase : List[str] = model(__a)[0]
_lowerCAmelCase : str = torch.Size([1, 8, 5_0358])
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3], __a, atol=1E-4))
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [True] * limit
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_lowerCAmelCase : str = i * 2
while index < limit:
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : int = index + i
_lowerCAmelCase : Tuple = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : int = prime_sieve(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
_lowerCAmelCase : Union[str, Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_lowerCAmelCase : Any = j - i
_lowerCAmelCase : Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
_snake_case = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_snake_case = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_snake_case = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types()), reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
], )
def snake_case__ ( self):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def snake_case__ ( self, __a, __a, __a=None, __a="uniform_average", __a=True):
'''simple docstring'''
_lowerCAmelCase : List[str] = mean_squared_error(
__a, __a, sample_weight=__a, multioutput=__a, squared=__a)
return {"mse": mse}
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[int]] = []
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Dict = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
_snake_case = [3, 34, 4, 12, 5, 2]
_snake_case = 9
_snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
if start < end:
_lowerCAmelCase : int = randint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = a[end]
_lowerCAmelCase : List[str] = a[pivot]
_lowerCAmelCase : Dict = temp
_lowerCAmelCase , _lowerCAmelCase : str = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = randint(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = a[end]
_lowerCAmelCase : Optional[int] = a[pivot]
_lowerCAmelCase : int = temp
_lowerCAmelCase : Optional[int] = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowerCAmelCase : str = new_pivot_index + 1
_lowerCAmelCase : Optional[int] = a[new_pivot_index]
_lowerCAmelCase : Optional[int] = a[index]
_lowerCAmelCase : Union[str, Any] = temp
_lowerCAmelCase : List[Any] = a[new_pivot_index + 1]
_lowerCAmelCase : Union[str, Any] = a[end]
_lowerCAmelCase : Any = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case, _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
return [ord(_lowerCamelCase ) - 96 for elem in plain]
def A ( _lowerCamelCase ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = encode(input("-> " ).strip().lower() )
print("Encoded: " , _lowerCamelCase )
print("Decoded:" , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt"}
_snake_case = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
_snake_case = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
_snake_case = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ConvBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : Union[str, Any] = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : Tuple = strip_accents
_lowerCAmelCase : Tuple = tokenize_chinese_chars
_lowerCAmelCase : List[str] = normalizer_class(**__a)
_lowerCAmelCase : str = do_lower_case
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
_lowerCAmelCase : Optional[Any] = nn.Parameter(_lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
_lowerCAmelCase : Dict = nn.Parameter(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = np.asarray(weights[0] )
_lowerCAmelCase : List[Any] = np.asarray(weights[1] )
_lowerCAmelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCamelCase ).view(-1 , _lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.asarray(weights[0] )
_lowerCAmelCase : Any = np.asarray(weights[1] )
_lowerCAmelCase : List[str] = np.asarray(weights[2] )
_lowerCAmelCase : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCamelCase ).view(-1 , _lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = weights[0][0][0]
_lowerCAmelCase : str = np.asarray(layer_norm_a[0] )
_lowerCAmelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) , )
# lsh weights + output
_lowerCAmelCase : str = weights[0][1]
if len(_lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(_lowerCamelCase , torch_block.attention , _lowerCamelCase )
else:
set_layer_weights_in_torch_local(_lowerCamelCase , torch_block.attention , _lowerCamelCase )
# intermediate weighs
_lowerCAmelCase : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowerCamelCase ) == 4:
_lowerCAmelCase : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) , )
# intermediate dense
_lowerCAmelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCamelCase ) , )
# intermediate out
_lowerCAmelCase : str = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCamelCase ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch_model.reformer
# word embeds
_lowerCAmelCase : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowerCamelCase ) , )
if isinstance(weights[3] , _lowerCamelCase ):
_lowerCAmelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.tensor(_lowerCamelCase ) )
_lowerCAmelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# output layer norm
_lowerCAmelCase : List[Any] = np.asarray(weights[7][0] )
_lowerCAmelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) , )
# output embeddings
_lowerCAmelCase : str = np.asarray(weights[9][0] )
_lowerCAmelCase : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCamelCase ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ReformerConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : str = ReformerModelWithLMHead(_lowerCamelCase )
with open(_lowerCamelCase , "rb" ) as f:
_lowerCAmelCase : Any = pickle.load(_lowerCamelCase )["weights"]
set_model_weights_in_torch(_lowerCamelCase , _lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
def A ( ):
'''simple docstring'''
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_snake_case = generate_large_matrix()
_snake_case = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A ( _lowerCamelCase ):
'''simple docstring'''
assert all(row == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for row in grid )
assert all(list(_lowerCamelCase ) == sorted(_lowerCamelCase , reverse=_lowerCamelCase ) for col in zip(*_lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Any = len(_lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowerCAmelCase : str = (left + right) // 2
_lowerCAmelCase : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowerCAmelCase : Tuple = mid + 1
else:
_lowerCAmelCase : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : str = len(grid[0] )
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCamelCase ) * len(grid[0] )) - total
def A ( _lowerCamelCase ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCamelCase ):
if number < 0:
total += len(_lowerCamelCase ) - i
break
return total
def A ( ):
'''simple docstring'''
from timeit import timeit
print("Running benchmarks" )
_lowerCAmelCase : List[Any] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowerCAmelCase : Dict = timeit(F"{func}(grid=grid)" , setup=_lowerCamelCase , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase : List[Any] = min(_lowerCamelCase ), max(_lowerCamelCase )
_lowerCAmelCase : List[str] = int(max_value - min_value ) + 1
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCamelCase )
return [v for bucket in buckets for v in sorted(_lowerCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=64, __a=2, __a=3, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, __a=[1, 16, 4, 4], __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Union[str, Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowerCAmelCase : List[str] = (self.image_size // 32) ** 2
_lowerCAmelCase : Union[str, Any] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__a, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.type_sequence_label_size
_lowerCAmelCase : Tuple = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[str] = model(__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ViTHybridModelTester(self)
_lowerCAmelCase : int = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
_lowerCAmelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = _config_zero_init(__a)
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowerCAmelCase : List[Any] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : str = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
# verify the logits
_lowerCAmelCase : int = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : Any = torch.tensor([-1.9_090, -0.4_993, -0.2_389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
@slow
@require_accelerate
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384")
_lowerCAmelCase : Optional[Any] = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384", device_map="auto")
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : int = image_processor(images=__a, return_tensors="pt")
_lowerCAmelCase : str = model(**__a)
_lowerCAmelCase : Dict = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowerCAmelCase : str = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], "tabby, tabby cat")
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["LayoutLMv3FeatureExtractor"]
_snake_case = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@parameterized.expand([(None,), ("foo.json",)])
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = GenerationConfig(
do_sample=__a, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a, config_name=__a)
_lowerCAmelCase : Any = GenerationConfig.from_pretrained(__a, config_name=__a)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample, __a)
self.assertEqual(loaded_config.temperature, 0.7)
self.assertEqual(loaded_config.length_penalty, 1.0)
self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k, 50)
self.assertEqual(loaded_config.max_length, 20)
self.assertEqual(loaded_config.max_time, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained("gpt2")
_lowerCAmelCase : int = GenerationConfig.from_model_config(__a)
_lowerCAmelCase : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a, __a)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = GenerationConfig()
_lowerCAmelCase : int = {
"max_new_tokens": 1024,
"foo": "bar",
}
_lowerCAmelCase : Dict = copy.deepcopy(__a)
_lowerCAmelCase : List[Any] = generation_config.update(**__a)
# update_kwargs was not modified (no side effects)
self.assertEqual(__a, __a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens, 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a, {"foo": "bar"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = GenerationConfig()
_lowerCAmelCase : Tuple = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__a)
_lowerCAmelCase : Tuple = GenerationConfig.from_pretrained(__a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo, "bar")
_lowerCAmelCase : List[str] = GenerationConfig.from_model_config(__a)
assert not hasattr(__a, "foo") # no new kwargs should be initialized if from config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature, 1.0)
self.assertEqual(default_config.do_sample, __a)
self.assertEqual(default_config.num_beams, 1)
_lowerCAmelCase : Any = GenerationConfig(
do_sample=__a, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
self.assertEqual(config.temperature, 0.7)
self.assertEqual(config.do_sample, __a)
self.assertEqual(config.num_beams, 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a)
_lowerCAmelCase : Union[str, Any] = GenerationConfig.from_pretrained(__a, temperature=1.0)
self.assertEqual(loaded_config.temperature, 1.0)
self.assertEqual(loaded_config.do_sample, __a)
self.assertEqual(loaded_config.num_beams, 1) # default value
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Dict = TOKEN
HfFolder.save_token(__a)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = GenerationConfig(
do_sample=__a, temperature=0.7, length_penalty=1.0, )
config.push_to_hub("test-generation-config", use_auth_token=self._token)
_lowerCAmelCase : str = GenerationConfig.from_pretrained(f"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a, repo_id="test-generation-config", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : Tuple = GenerationConfig.from_pretrained(f"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a, getattr(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = GenerationConfig(
do_sample=__a, temperature=0.7, length_penalty=1.0, )
config.push_to_hub("valid_org/test-generation-config-org", use_auth_token=self._token)
_lowerCAmelCase : List[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a, getattr(__a, __a))
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a, repo_id="valid_org/test-generation-config-org", push_to_hub=__a, use_auth_token=self._token)
_lowerCAmelCase : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a, getattr(__a, __a))
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, "models/bert/"))
_lowerCAmelCase : Tuple = self.transformer_dir
shutil.copy(
os.path.join(__a, "src/transformers/models/bert/modeling_bert.py"), os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = "src/transformers"
shutil.rmtree(self.transformer_dir)
def snake_case__ ( self, __a, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
_lowerCAmelCase : int = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
_lowerCAmelCase : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119)
_lowerCAmelCase : List[str] = black.format_str(__a, mode=__a)
_lowerCAmelCase : Optional[Any] = os.path.join(self.transformer_dir, "new_code.py")
with open(__a, "w", newline="\n") as f:
f.write(__a)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a)) == 0)
else:
check_copies.is_copy_consistent(f.name, overwrite=__a)
with open(__a, "r") as f:
self.assertTrue(f.read(), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead")
self.assertEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE + "\n", )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", __a, )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", re.sub("Bert", "TestModel", __a), )
# Copy consistency with a really long name
_lowerCAmelCase : int = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", f"{long_class_name}LMPredictionHead", re.sub("Bert", __a, __a), )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", __a, overwrite_result=re.sub("Bert", "TestModel", __a), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowerCAmelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowerCAmelCase : Any = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
self.assertFalse(__a)
self.assertEqual(__a, __a)
_lowerCAmelCase , _lowerCAmelCase : str = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__a)
_lowerCAmelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowerCAmelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_lowerCAmelCase , _lowerCAmelCase : Tuple = check_copies.convert_to_localized_md(
__a, __a, localized_readme["format_model_list"])
# Check if the model link is synchronized.
self.assertEqual(__a, __a)
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
_lowerCAmelCase : Optional[Any] = hex_num[0] == "-"
if is_negative:
_lowerCAmelCase : List[str] = hex_num[1:]
try:
_lowerCAmelCase : Tuple = int(_lowerCamelCase , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
_lowerCAmelCase : List[Any] = ""
while int_num > 0:
_lowerCAmelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
_snake_case = False
class UpperCAmelCase_ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion")
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : str = "A painting of a squirrel eating a burger "
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, generator=__a, guidance_scale=7.5, num_inference_steps=2, output_type="numpy").images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a)
_lowerCAmelCase : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[str] = generator.manual_seed(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, generator=__a, guidance_scale=7.5, num_inference_steps=2, output_type="numpy").images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion", torch_dtype=torch.floataa)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger "
_lowerCAmelCase : List[Any] = torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = pipe(
prompt=__a, generator=__a, guidance_scale=7.5, num_inference_steps=50, output_type="numpy").images
_lowerCAmelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a = None, ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__a, vae=__a, scheduler=__a)
# create a imagenet -> id dictionary for easier use
_lowerCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(","):
_lowerCAmelCase : Optional[int] = int(__a)
_lowerCAmelCase : Union[str, Any] = dict(sorted(self.labels.items()))
def snake_case__ ( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
_lowerCAmelCase : int = list(__a)
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self, __a, __a = 4.0, __a = None, __a = 50, __a = "pil", __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len(__a)
_lowerCAmelCase : Optional[int] = self.transformer.config.sample_size
_lowerCAmelCase : int = self.transformer.config.in_channels
_lowerCAmelCase : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size), generator=__a, device=self.device, dtype=self.transformer.dtype, )
_lowerCAmelCase : Union[str, Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_lowerCAmelCase : List[str] = torch.tensor(__a, device=self.device).reshape(-1)
_lowerCAmelCase : Optional[int] = torch.tensor([1000] * batch_size, device=self.device)
_lowerCAmelCase : List[str] = torch.cat([class_labels, class_null], 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_lowerCAmelCase : Any = latent_model_input[: len(__a) // 2]
_lowerCAmelCase : Optional[Any] = torch.cat([half, half], dim=0)
_lowerCAmelCase : Tuple = self.scheduler.scale_model_input(__a, __a)
_lowerCAmelCase : str = t
if not torch.is_tensor(__a):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCAmelCase : List[Any] = latent_model_input.device.type == "mps"
if isinstance(__a, __a):
_lowerCAmelCase : str = torch.floataa if is_mps else torch.floataa
else:
_lowerCAmelCase : List[Any] = torch.intaa if is_mps else torch.intaa
_lowerCAmelCase : Tuple = torch.tensor([timesteps], dtype=__a, device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Tuple = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_lowerCAmelCase : Union[str, Any] = self.transformer(
__a, timestep=__a, class_labels=__a).sample
# perform guidance
if guidance_scale > 1:
_lowerCAmelCase , _lowerCAmelCase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = torch.split(__a, len(__a) // 2, dim=0)
_lowerCAmelCase : Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCAmelCase : Optional[Any] = torch.cat([half_eps, half_eps], dim=0)
_lowerCAmelCase : Optional[int] = torch.cat([eps, rest], dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = torch.split(__a, __a, dim=1)
else:
_lowerCAmelCase : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : Dict = self.scheduler.step(__a, __a, __a).prev_sample
if guidance_scale > 1:
_lowerCAmelCase , _lowerCAmelCase : Dict = latent_model_input.chunk(2, dim=0)
else:
_lowerCAmelCase : Dict = latent_model_input
_lowerCAmelCase : Optional[int] = 1 / self.vae.config.scaling_factor * latents
_lowerCAmelCase : Any = self.vae.decode(__a).sample
_lowerCAmelCase : List[Any] = (samples / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : Optional[Any] = samples.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Optional[int] = self.numpy_to_pil(__a)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a)
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
_snake_case = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = SqueezeBertTokenizer
def __init__( self, __a=None, __a=None, __a=True, __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(
__a, tokenizer_file=__a, do_lower_case=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, tokenize_chinese_chars=__a, strip_accents=__a, **__a, )
_lowerCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", __a) != do_lower_case
or normalizer_state.get("strip_accents", __a) != strip_accents
or normalizer_state.get("handle_chinese_chars", __a) != tokenize_chinese_chars
):
_lowerCAmelCase : Tuple = getattr(__a, normalizer_state.pop("type"))
_lowerCAmelCase : str = do_lower_case
_lowerCAmelCase : List[str] = strip_accents
_lowerCAmelCase : Tuple = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**__a)
_lowerCAmelCase : Tuple = do_lower_case
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(__a, name=__a)
return tuple(__a)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_enforce_args(_lowerCamelCase , _lowerCamelCase )
if n == 0:
return 0
_lowerCAmelCase : Optional[Any] = float("-inf" )
for i in range(1 , n + 1 ):
_lowerCAmelCase : int = max(
_lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowerCamelCase ) )
return max_revue
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_enforce_args(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCAmelCase : Optional[int] = float("-inf" )
for i in range(1 , n + 1 ):
_lowerCAmelCase : List[Any] = max(
_lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[Any] = max_revenue
return max_rev[n]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_enforce_args(_lowerCamelCase , _lowerCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCAmelCase : Tuple = [float("-inf" ) for _ in range(n + 1 )]
_lowerCAmelCase : str = 0
for i in range(1 , n + 1 ):
_lowerCAmelCase : Optional[int] = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase , prices[j - 1] + max_rev[i - j] )
_lowerCAmelCase : Tuple = max_revenue_i
return max_rev[n]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if n < 0:
_lowerCAmelCase : int = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(_lowerCamelCase )
if n > len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (
"Each integral piece of rod must have a corresponding price. "
F"Got n = {n} but length of prices = {len(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = [6, 10, 12, 15, 20, 23]
_lowerCAmelCase : Any = len(_lowerCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCAmelCase : int = 36
_lowerCAmelCase : Any = top_down_cut_rod(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = bottom_up_cut_rod(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = naive_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_snake_case = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( _lowerCamelCase ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return max(metric_fn(_lowerCamelCase , _lowerCamelCase ) for gt in ground_truths )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : Optional[Any] = []
if args.gold_data_mode == "qa":
_lowerCAmelCase : str = pd.read_csv(_lowerCamelCase , sep="\t" , header=_lowerCamelCase )
for answer_list in data[1]:
_lowerCAmelCase : List[Any] = ast.literal_eval(_lowerCamelCase )
answers.append(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : List[Any] = [[reference] for reference in references]
_lowerCAmelCase : List[str] = 0
for prediction, ground_truths in zip(_lowerCamelCase , _lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
fa += metric_max_over_ground_truths(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1_00.0 * em / total
_lowerCAmelCase : Any = 1_00.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = args.k
_lowerCAmelCase : Union[str, Any] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : Optional[int] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : List[str] = 0
for hypo, reference in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Dict = set(hypo.split("\t" )[:k] )
_lowerCAmelCase : str = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase : str = 1_00.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def strip_title(_lowerCamelCase ):
if title.startswith("\"" ):
_lowerCAmelCase : Any = title[1:]
if title.endswith("\"" ):
_lowerCAmelCase : Any = title[:-1]
return title
_lowerCAmelCase : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase , truncation=_lowerCamelCase , )["input_ids"].to(args.device )
_lowerCAmelCase : int = rag_model.rag.question_encoder(_lowerCamelCase )
_lowerCAmelCase : List[Any] = question_enc_outputs[0]
_lowerCAmelCase : Any = rag_model.retriever(
_lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowerCAmelCase : int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase : Tuple = []
for docs in all_docs:
_lowerCAmelCase : str = [strip_title(_lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCamelCase ) )
return provenance_strings
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase , truncation=_lowerCamelCase )
_lowerCAmelCase : Any = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase : Optional[int] = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase : Tuple = rag_model.generate( # rag_model overwrites generate
_lowerCamelCase , attention_mask=_lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase : Dict = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
if args.print_predictions:
for q, a in zip(_lowerCamelCase , _lowerCamelCase ):
logger.info("Q: {} - A: {}".format(_lowerCamelCase , _lowerCamelCase ) )
return answers
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCamelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCamelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCamelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCamelCase , type=_lowerCamelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCamelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCamelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCamelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCamelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCamelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCamelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCamelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCamelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCamelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {}
if args.model_type is None:
_lowerCAmelCase : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowerCAmelCase : Tuple = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowerCAmelCase : Any = args.n_docs
if args.index_name is not None:
_lowerCAmelCase : Optional[Any] = args.index_name
if args.index_path is not None:
_lowerCAmelCase : List[Any] = args.index_path
else:
_lowerCAmelCase : List[Any] = BartForConditionalGeneration
_lowerCAmelCase : Union[str, Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCamelCase )
_lowerCAmelCase : str = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowerCAmelCase : int = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCamelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowerCAmelCase : Optional[Any] = RagRetriever.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = model_class.from_pretrained(_lowerCamelCase , retriever=_lowerCamelCase , **_lowerCamelCase )
model.retriever.init_retrieval()
else:
_lowerCAmelCase : str = model_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowerCAmelCase : Optional[Any] = []
for line in tqdm(_lowerCamelCase ):
questions.append(line.strip() )
if len(_lowerCamelCase ) == args.eval_batch_size:
_lowerCAmelCase : Any = evaluate_batch_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
preds_file.write("\n".join(_lowerCamelCase ) + "\n" )
preds_file.flush()
_lowerCAmelCase : Dict = []
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[int] = evaluate_batch_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
preds_file.write("\n".join(_lowerCamelCase ) )
preds_file.flush()
score_fn(_lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_snake_case = get_args()
main(args)
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
_lowerCAmelCase : List[str] = _modexpt(_lowerCamelCase , exponent // 2 , _lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCamelCase , exponent - 1 , _lowerCamelCase )) % modulo_value
def A ( _lowerCamelCase = 1_777 , _lowerCamelCase = 1_855 , _lowerCamelCase = 8 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = base
for _ in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[str] = _modexpt(_lowerCamelCase , _lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
from math import ceil
def A ( _lowerCamelCase = 1_001 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCAmelCase : Dict = 2 * i + 1
_lowerCAmelCase : Optional[int] = 2 * i
_lowerCAmelCase : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = defaultdict(_lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase : List[str] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Dict = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[model_idx]["sections"]
_lowerCAmelCase : Optional[int] = [(idx, section) for idx, section in enumerate(_lowerCamelCase ) if "sections" in section]
_lowerCAmelCase : Optional[Any] = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase : Tuple = modality_doc["sections"]
_lowerCAmelCase : Optional[Any] = clean_model_doc_toc(_lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase : int = True
if overwrite:
_lowerCAmelCase : Any = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase : Union[str, Any] = model_doc
_lowerCAmelCase : List[str] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'bloom'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self, __a=25_0880, __a=64, __a=2, __a=8, __a=1E-5, __a=0.02, __a=True, __a=1, __a=2, __a=False, __a=0.0, __a=0.0, __a=1, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase : Tuple = kwargs.pop("n_embed", __a)
_lowerCAmelCase : List[str] = hidden_size if n_embed is None else n_embed
_lowerCAmelCase : Any = n_layer
_lowerCAmelCase : Dict = n_head
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : Tuple = pretraining_tp
_lowerCAmelCase : Optional[Any] = apply_residual_connection_post_layernorm
_lowerCAmelCase : Optional[int] = hidden_dropout
_lowerCAmelCase : str = attention_dropout
_lowerCAmelCase : Optional[int] = bos_token_id
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : int = slow_but_exact
super().__init__(bos_token_id=__a, eos_token_id=__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.12')
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__a, task=__a, patching_specs=__a, use_past=__a)
if not getattr(self._config, "pad_token_id", __a):
# TODO: how to do that better?
_lowerCAmelCase : str = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a, direction="inputs", inverted_values_shape=__a)
_lowerCAmelCase : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-3
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = super(__a, self).generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : List[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase : Any = self._config.hidden_size // self.num_attention_heads
_lowerCAmelCase : Tuple = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_lowerCAmelCase : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_lowerCAmelCase : Optional[Any] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_lowerCAmelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : List[str] = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a, __a, dtype=__a)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="attention" ):
'''simple docstring'''
_lowerCAmelCase : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
_lowerCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_lowerCAmelCase : Optional[int] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
_lowerCAmelCase : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_lowerCAmelCase : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
_lowerCAmelCase : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_lowerCAmelCase : Tuple = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
_lowerCAmelCase : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
if split_mlp_wi:
_lowerCAmelCase : List[str] = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
_lowerCAmelCase : int = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
_lowerCAmelCase : Dict = (wi_a, wi_a)
else:
_lowerCAmelCase : List[Any] = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
_lowerCAmelCase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def A ( _lowerCamelCase , *, _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = traverse_util.flatten_dict(variables["target"] )
_lowerCAmelCase : List[str] = {"/".join(_lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase : int = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _lowerCamelCase )
_lowerCAmelCase : List[str] = collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase : Dict = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : int = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "attention" )
_lowerCAmelCase : Union[str, Any] = layer_norm
_lowerCAmelCase : Optional[Any] = k.T
_lowerCAmelCase : Tuple = o.T
_lowerCAmelCase : Tuple = q.T
_lowerCAmelCase : int = v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase : Any = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "encoder" , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = wi[0].T
_lowerCAmelCase : str = wi[1].T
else:
_lowerCAmelCase : Optional[int] = wi.T
_lowerCAmelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase : Any = tax_relpos_bias_lookup(
_lowerCamelCase , _lowerCamelCase , "encoder" ).T
_lowerCAmelCase : Optional[int] = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_lowerCAmelCase : Any = tax_relpos_bias_lookup(
_lowerCamelCase , 0 , "encoder" ).T
_lowerCAmelCase : int = tax_relpos_bias_lookup(
_lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowerCamelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase : Dict = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "self_attention" )
_lowerCAmelCase : List[str] = layer_norm
_lowerCAmelCase : List[str] = k.T
_lowerCAmelCase : Tuple = o.T
_lowerCAmelCase : Optional[Any] = q.T
_lowerCAmelCase : Any = v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase : List[str] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = tax_attention_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "encoder_decoder_attention" )
_lowerCAmelCase : Any = layer_norm
_lowerCAmelCase : List[str] = k.T
_lowerCAmelCase : Dict = o.T
_lowerCAmelCase : Dict = q.T
_lowerCAmelCase : str = v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase : Optional[int] = tax_layer_norm_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = tax_mlp_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" , _lowerCamelCase )
_lowerCAmelCase : str = layer_norm
if split_mlp_wi:
_lowerCAmelCase : List[Any] = wi[0].T
_lowerCAmelCase : int = wi[1].T
else:
_lowerCAmelCase : Any = wi.T
_lowerCAmelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase : int = tax_relpos_bias_lookup(_lowerCamelCase , _lowerCamelCase , "decoder" ).T
_lowerCAmelCase : Optional[Any] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase : List[str] = old["decoder/logits_dense/kernel"].T
return new
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase : Optional[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_lowerCAmelCase : Tuple = state_dict["shared.weight"]
return state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : int = convert_tax_to_pytorch(
_lowerCamelCase , num_layers=config.num_layers , is_encoder_only=_lowerCamelCase , scalable_attention=_lowerCamelCase )
_lowerCAmelCase : Any = make_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = MTaConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase : List[Any] = UMTaEncoderModel(_lowerCamelCase )
else:
_lowerCAmelCase : Dict = UMTaForConditionalGeneration(_lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCamelCase )
print("Done" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
_snake_case = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Dict = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : List[str] = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : Any = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : Dict = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Dict = tf.placeholder("float64" , [dim] )
_lowerCAmelCase : Union[str, Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : str = tf.placeholder("int32" )
_lowerCAmelCase : int = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Tuple = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Optional[int] = tf.placeholder("float" , [dim] )
_lowerCAmelCase : Tuple = tf.placeholder("float" , [dim] )
_lowerCAmelCase : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Optional[int] = tf.placeholder("float" , [noofclusters] )
_lowerCAmelCase : List[str] = tf.argmin(_lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Dict = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Union[str, Any] = [
sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : str = sess.run(
_lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : Optional[int] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[Any] = sess.run(
_lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : str = sess.run(_lowerCamelCase )
_lowerCAmelCase : int = sess.run(_lowerCamelCase )
return centroids, assignments
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = UnCLIPImageVariationPipeline
lowerCamelCase__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowerCamelCase__ = IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowerCamelCase__ = False
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self):
'''simple docstring'''
return 100
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, )
return CLIPVisionModelWithProjection(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
_lowerCAmelCase : Union[str, Any] = UnCLIPTextProjModel(**__a)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
_lowerCAmelCase : Tuple = UNetaDConditionModel(**__a)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : str = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(1)
_lowerCAmelCase : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.dummy_decoder
_lowerCAmelCase : int = self.dummy_text_proj
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : str = self.dummy_tokenizer
_lowerCAmelCase : Optional[Any] = self.dummy_super_res_first
_lowerCAmelCase : List[Any] = self.dummy_super_res_last
_lowerCAmelCase : Tuple = UnCLIPScheduler(
variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, )
_lowerCAmelCase : Any = UnCLIPScheduler(
variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, )
_lowerCAmelCase : Dict = CLIPImageProcessor(crop_size=32, size=32)
_lowerCAmelCase : str = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def snake_case__ ( self, __a, __a=0, __a=True):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(__a)).to(__a)
if str(__a).startswith("mps"):
_lowerCAmelCase : List[Any] = torch.manual_seed(__a)
else:
_lowerCAmelCase : Dict = torch.Generator(device=__a).manual_seed(__a)
if pil_image:
_lowerCAmelCase : Any = input_image * 0.5 + 0.5
_lowerCAmelCase : int = input_image.clamp(0, 1)
_lowerCAmelCase : Optional[Any] = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
_lowerCAmelCase : Dict = DiffusionPipeline.numpy_to_pil(__a)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**__a)
_lowerCAmelCase : int = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : List[str] = pipe(**__a)
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : str = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Optional[int] = pipe(
**__a, return_dict=__a, )[0]
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**__a)
_lowerCAmelCase : Tuple = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Optional[int] = pipe(**__a)
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Optional[Any] = pipe(
**__a, return_dict=__a, )[0]
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu"
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**__a)
_lowerCAmelCase : List[str] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : List[str] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
_lowerCAmelCase : Union[str, Any] = pipe(**__a)
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Any = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Tuple = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
_lowerCAmelCase : Dict = pipe(
**__a, return_dict=__a, )[0]
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_lowerCAmelCase : Dict = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.device("cpu")
class UpperCAmelCase_ :
lowerCamelCase__ = 1
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**__a)
_lowerCAmelCase : Dict = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = torch.Generator(device=__a).manual_seed(0)
_lowerCAmelCase : List[str] = pipe.decoder.dtype
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_lowerCAmelCase : List[Any] = pipe.prepare_latents(
__a, dtype=__a, device=__a, generator=__a, latents=__a, scheduler=DummyScheduler())
_lowerCAmelCase : Union[str, Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_lowerCAmelCase : int = pipe.prepare_latents(
__a, dtype=__a, device=__a, generator=__a, latents=__a, scheduler=DummyScheduler())
_lowerCAmelCase : str = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Dict = pipe(
**__a, decoder_latents=__a, super_res_latents=__a).images
_lowerCAmelCase : int = self.get_dummy_inputs(__a, pil_image=__a)
# Don't pass image, instead pass embedding
_lowerCAmelCase : Union[str, Any] = pipeline_inputs.pop("image")
_lowerCAmelCase : Dict = pipe.image_encoder(__a).image_embeds
_lowerCAmelCase : str = pipe(
**__a, decoder_latents=__a, super_res_latents=__a, image_embeddings=__a, ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1E-4
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_lowerCAmelCase : Optional[int] = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a, expected_max_diff=__a)
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch_device == "cpu"
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[str] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=__a, relax_max_difference=__a, additional_params_copy_to_batched_inputs=__a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_lowerCAmelCase : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a, additional_params_copy_to_batched_inputs=__a, )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a)
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy")
_lowerCAmelCase : Any = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.floataa)
_lowerCAmelCase : Dict = pipeline.to(__a)
pipeline.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Dict = torch.Generator(device="cpu").manual_seed(0)
_lowerCAmelCase : Tuple = pipeline(
__a, generator=__a, output_type="np", )
_lowerCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__a, __a, 15)
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return [tuple(_lowerCamelCase )]
_lowerCAmelCase : Optional[Any] = []
def generate(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = [0] * n
res.append(tuple(_lowerCamelCase ) )
_lowerCAmelCase : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCAmelCase , _lowerCAmelCase : List[str] = arr[i], arr[0]
else:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = arr[i], arr[c[i]]
res.append(tuple(_lowerCamelCase ) )
c[i] += 1
_lowerCAmelCase : Dict = 0
else:
_lowerCAmelCase : Optional[int] = 0
i += 1
generate(len(_lowerCamelCase ) , _lowerCamelCase )
return res
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import unittest
from transformers import DonutProcessor
_snake_case = "naver-clova-ix/donut-base"
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DonutProcessor.from_pretrained(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
_lowerCAmelCase : Optional[Any] = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
_lowerCAmelCase : Dict = self.processor.tokenajson(__a)
self.assertDictEqual(__a, __a)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if b == 0:
return (1, 0)
((_lowerCAmelCase) , (_lowerCAmelCase)) : Any = extended_euclid(_lowerCamelCase , a % b )
_lowerCAmelCase : List[str] = a // b
return (y, x - k * y)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) : Tuple = extended_euclid(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = na * na
_lowerCAmelCase : int = ra * x * na + ra * y * na
return (n % m + m) % m
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) : List[Any] = extended_euclid(_lowerCamelCase , _lowerCamelCase )
if b < 0:
_lowerCAmelCase : Dict = (b % n + n) % n
return b
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = invert_modulo(_lowerCamelCase , _lowerCamelCase ), invert_modulo(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = na * na
_lowerCAmelCase : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = 0
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_lowerCamelCase ):
return None
_lowerCAmelCase : Tuple = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_lowerCAmelCase : Tuple = left
_lowerCAmelCase : Tuple = point
elif point > right:
_lowerCAmelCase : Optional[Any] = right
_lowerCAmelCase : Optional[int] = point
else:
if item < current_item:
_lowerCAmelCase : Optional[Any] = point - 1
else:
_lowerCAmelCase : List[Any] = point + 1
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
_lowerCamelCase , _lowerCamelCase , point + 1 , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if collection != sorted(_lowerCamelCase ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
_snake_case = 0
if debug == 1:
_snake_case = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_snake_case = 67
_snake_case = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("Not found")
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
if name is None:
_lowerCAmelCase : List[Any] = None
else:
_lowerCAmelCase : List[Any] = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
_lowerCAmelCase : Optional[Any] = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCamelCase , torch.Tensor ):
print(_lowerCamelCase , ":" , val.size() )
else:
print(_lowerCamelCase , ":" , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : List[Any] = param.view(*_lowerCamelCase )
_lowerCAmelCase : Tuple = param.transpose(0 , 2 )
_lowerCAmelCase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : Any = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : Any = param.view(*_lowerCamelCase )
_lowerCAmelCase : Any = param.transpose(0 , 1 ).contiguous()
_lowerCAmelCase : Dict = param.view(*_lowerCamelCase )
return param
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
# old versions did not store training args
_lowerCAmelCase : str = input_state_dict.get("args" , _lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : int = ds_args.padded_vocab_size
_lowerCAmelCase : int = ds_args.max_position_embeddings
_lowerCAmelCase : Optional[int] = ds_args.hidden_size
_lowerCAmelCase : Dict = ds_args.num_layers
_lowerCAmelCase : Dict = ds_args.num_attention_heads
_lowerCAmelCase : Union[str, Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : Any = config.n_head
# The hidden_size per head.
_lowerCAmelCase : Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : Union[str, Any] = input_state_dict["checkpoint_version"]
else:
_lowerCAmelCase : List[str] = 0.0
# The model.
_lowerCAmelCase : Dict = input_state_dict["model"]
# The language model.
_lowerCAmelCase : Optional[Any] = model["language_model"]
# The embeddings.
_lowerCAmelCase : Tuple = lm["embedding"]
# The word embeddings.
_lowerCAmelCase : Any = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : Any = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : Optional[int] = word_embeddings
# The position embeddings.
_lowerCAmelCase : Optional[int] = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : Optional[int] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_lowerCAmelCase : List[str] = pos_embeddings
# The transformer.
_lowerCAmelCase : Optional[int] = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
_lowerCAmelCase : Optional[int] = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
_lowerCAmelCase : Dict = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : str = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Any = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : Tuple = m.group(3 )
# The name of the layer.
_lowerCAmelCase : Tuple = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
_lowerCAmelCase : List[Any] = "ln_1" if op_name.startswith("input" ) else "ln_2"
_lowerCAmelCase : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : Union[str, Any] = torch.tensor(-1e4 , dtype=torch.floataa )
_lowerCAmelCase : Dict = masked_bias
_lowerCAmelCase : List[str] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCAmelCase : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : str = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Any = megatron_to_transformers[op_name]
_lowerCAmelCase : Optional[int] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : Tuple = megatron_to_transformers[op_name]
_lowerCAmelCase : Any = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : str = transformer["final_layernorm.weight"]
_lowerCAmelCase : List[Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : Any = word_embeddings
# It should be done!
return output_state_dict
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_lowerCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_lowerCamelCase , help="An optional config json file describing the pre-trained model." , )
_lowerCAmelCase : Dict = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
_lowerCAmelCase : Union[str, Any] = torch.load(_lowerCamelCase , map_location="cpu" )
else:
_lowerCAmelCase : Dict = torch.load(args.path_to_checkpoint , map_location="cpu" )
_lowerCAmelCase : Dict = input_state_dict.get("args" , _lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : Tuple = "gelu_fast"
elif ds_args.openai_gelu:
_lowerCAmelCase : Tuple = "gelu_new"
else:
_lowerCAmelCase : str = "gelu"
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : List[Any] = "gelu_new"
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : Tuple = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_lowerCamelCase , summary_activation=_lowerCamelCase , summary_proj_to_labels=_lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_lowerCAmelCase : Tuple = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : str = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
_lowerCAmelCase : Any = convert_megatron_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase , _lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : int = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : Optional[Any] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_lowerCAmelCase : Union[str, Any] = "gpt2"
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[Any] = type(_lowerCamelCase ).__name__
_lowerCAmelCase : Any = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_lowerCamelCase , _lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'ClapFeatureExtractor'
lowerCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
def __call__( self, __a=None, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.pop("sampling_rate", __a)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
_lowerCAmelCase : List[str] = self.tokenizer(__a, return_tensors=__a, **__a)
if audios is not None:
_lowerCAmelCase : Dict = self.feature_extractor(
__a, sampling_rate=__a, return_tensors=__a, **__a)
if text is not None and audios is not None:
_lowerCAmelCase : int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a), tensor_type=__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer.model_input_names
_lowerCAmelCase : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10**-10 ):
'''simple docstring'''
_lowerCAmelCase : Any = a
while True:
_lowerCAmelCase : Dict = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import heapq
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_lowerCamelCase , [-1 * len(_lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_lowerCAmelCase : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_lowerCAmelCase : List[str] = heapq.heappop(_lowerCamelCase )[1][0]
chosen_vertices.add(_lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_lowerCAmelCase : Union[str, Any] = elem[1][1].index(_lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=3, __a=4, __a=2, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=36, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=6, __a=6, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Any = shape_size
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : int = num_choices
_lowerCAmelCase : Any = scope
_lowerCAmelCase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : int = text_seq_length
_lowerCAmelCase : str = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size)
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox)
_lowerCAmelCase : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : str = bbox[i, j, 3]
_lowerCAmelCase : Tuple = bbox[i, j, 1]
_lowerCAmelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : List[Any] = bbox[i, j, 2]
_lowerCAmelCase : Any = bbox[i, j, 0]
_lowerCAmelCase : Tuple = tmp_coordinate
_lowerCAmelCase : Dict = tf.constant(__a)
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.text_seq_length])
_lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size)
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels)
_lowerCAmelCase : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMvaModel(config=__a)
# text + image
_lowerCAmelCase : Tuple = model(__a, pixel_values=__a, training=__a)
_lowerCAmelCase : Optional[Any] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, training=__a, )
_lowerCAmelCase : List[str] = model(__a, bbox=__a, pixel_values=__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# text only
_lowerCAmelCase : int = model(__a, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_lowerCAmelCase : Any = model({"pixel_values": pixel_values}, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : List[Any] = TFLayoutLMvaForSequenceClassification(config=__a)
_lowerCAmelCase : List[str] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : str = TFLayoutLMvaForTokenClassification(config=__a)
_lowerCAmelCase : Any = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : List[str] = TFLayoutLMvaForQuestionAnswering(config=__a)
_lowerCAmelCase : Any = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, training=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Tuple = config_and_inputs
_lowerCAmelCase : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(__a)
if model_class in get_values(__a):
_lowerCAmelCase : int = {
k: tf.tile(tf.expand_dims(__a, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(__a, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Dict = tf.ones(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : List[Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
_lowerCAmelCase : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Optional[int] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa)
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFLayoutLMvaModelTester(self)
_lowerCAmelCase : Any = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__a)
if getattr(__a, "hf_compute_loss", __a):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=__a)[0]
]
_lowerCAmelCase : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase : str = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : List[str] = prepared_for_class.pop("input_ids")
_lowerCAmelCase : Tuple = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Union[str, Any] = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
_lowerCAmelCase : Union[str, Any] = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_lowerCAmelCase : int = -100
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(__a)
_lowerCAmelCase : int = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : int = model(__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase : Any = prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase : List[str] = inspect.signature(model.call).parameters
_lowerCAmelCase : List[Any] = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase : str = {0: "input_ids"}
for label_key in label_keys:
_lowerCAmelCase : int = signature_names.index(__a)
_lowerCAmelCase : str = label_key
_lowerCAmelCase : Tuple = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase : Optional[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase : Tuple = prepared_for_class[value]
_lowerCAmelCase : Any = tuple(__a)
# Send to model
_lowerCAmelCase : Tuple = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a, __a, __a, __a, __a, __a, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a) if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=__a, return_tensors="tf").pixel_values
_lowerCAmelCase : Tuple = tf.constant([[1, 2]])
_lowerCAmelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0)
# forward pass
_lowerCAmelCase : int = model(input_ids=__a, bbox=__a, pixel_values=__a, training=__a)
# verify the logits
_lowerCAmelCase : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape, __a)
_lowerCAmelCase : int = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-4))
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a):
@register_to_config
def __init__( self, *,
__a = 4, __a = 768, __a, __a, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.zeros(__a))
# parameters for additional clip time embeddings
_lowerCAmelCase : Dict = nn.Linear(__a, __a)
_lowerCAmelCase : int = nn.Linear(__a, __a)
# parameters for encoder hidden states
_lowerCAmelCase : List[Any] = clip_extra_context_tokens
_lowerCAmelCase : int = nn.Linear(
__a, self.clip_extra_context_tokens * cross_attention_dim)
_lowerCAmelCase : List[str] = nn.Linear(__a, __a)
_lowerCAmelCase : Dict = nn.LayerNorm(__a)
def snake_case__ ( self, *, __a, __a, __a, __a):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_lowerCAmelCase : List[Any] = image_embeddings.shape[0]
_lowerCAmelCase : Optional[Any] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
_lowerCAmelCase : Optional[int] = classifier_free_guidance_embeddings.expand(
__a, -1)
_lowerCAmelCase : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_lowerCAmelCase : Dict = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_lowerCAmelCase : List[str] = self.embedding_proj(__a)
_lowerCAmelCase : List[str] = self.clip_image_embeddings_project_to_time_embeddings(__a)
_lowerCAmelCase : List[Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_lowerCAmelCase : Any = self.clip_extra_context_tokens_proj(__a)
_lowerCAmelCase : List[Any] = clip_extra_context_tokens.reshape(__a, -1, self.clip_extra_context_tokens)
_lowerCAmelCase : Optional[int] = clip_extra_context_tokens.permute(0, 2, 1)
_lowerCAmelCase : str = self.encoder_hidden_states_proj(__a)
_lowerCAmelCase : Any = self.text_encoder_hidden_states_norm(__a)
_lowerCAmelCase : Optional[int] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = args.pruning_method
_lowerCAmelCase : Dict = args.threshold
_lowerCAmelCase : Any = args.model_name_or_path.rstrip("/" )
_lowerCAmelCase : str = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
_lowerCAmelCase : Any = torch.load(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
_lowerCAmelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowerCAmelCase : Optional[Any] = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
_lowerCAmelCase : Dict = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
_lowerCAmelCase : Any = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
_lowerCAmelCase : str = MagnitudeBinarizer.apply(inputs=_lowerCamelCase , threshold=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowerCAmelCase : Optional[Any] = name[:-6]
_lowerCAmelCase : List[Any] = model[F"{prefix_}mask_scores"]
_lowerCAmelCase : int = TopKBinarizer.apply(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowerCAmelCase : int = name[:-6]
_lowerCAmelCase : Optional[Any] = model[F"{prefix_}mask_scores"]
_lowerCAmelCase : str = ThresholdBinarizer.apply(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowerCAmelCase : int = name[:-6]
_lowerCAmelCase : Optional[int] = model[F"{prefix_}mask_scores"]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = -0.1, 1.1
_lowerCAmelCase : Union[str, Any] = torch.sigmoid(_lowerCamelCase )
_lowerCAmelCase : Tuple = s * (r - l) + l
_lowerCAmelCase : Any = s_bar.clamp(min=0.0 , max=1.0 )
_lowerCAmelCase : Any = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_lowerCAmelCase : Optional[Any] = os.path.join(
os.path.dirname(_lowerCamelCase ) , F"bertarized_{os.path.basename(_lowerCamelCase )}" )
if not os.path.isdir(_lowerCamelCase ):
shutil.copytree(_lowerCamelCase , _lowerCamelCase )
print(F"\nCreated folder {target_model_path}" )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_snake_case = parser.parse_args()
main(args)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'visual_bert'
def __init__( self, __a=3_0522, __a=768, __a=512, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=False, __a=True, __a=1, __a=0, __a=2, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Dict = visual_embedding_dim
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Dict = bypass_transformer
_lowerCAmelCase : Optional[Any] = special_visual_initialize
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
from typing import List
import numpy as np
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {key: len(_lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(_lowerCamelCase , _lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_lowerCAmelCase : Dict = max(lists_lengths.values() , default=0 )
return max(1 , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for group_idx in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_lowerCAmelCase : Optional[int] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_lowerCAmelCase : Optional[int] = range(_lowerCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(_lowerCamelCase )
return shards_indices_per_group
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = _number_of_shards_in_gen_kwargs(_lowerCamelCase )
if num_shards == 1:
return [dict(_lowerCamelCase )]
else:
_lowerCAmelCase : Tuple = _distribute_shards(num_shards=_lowerCamelCase , max_num_jobs=_lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_lowerCamelCase , _lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_lowerCamelCase ) )
]
def A ( _lowerCamelCase ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {len(_lowerCamelCase ) for value in gen_kwargs.values() if isinstance(_lowerCamelCase , _lowerCamelCase )}
_lowerCAmelCase : List[Any] = {}
for size in list_sizes:
_lowerCAmelCase : int = list(range(_lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_lowerCAmelCase : List[str] = dict(_lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Dict = [value[i] for i in indices_per_size[len(_lowerCamelCase )]]
return shuffled_kwargs
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
from __future__ import annotations
from math import pi
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( a):
lowerCamelCase__ = (IPNDMScheduler,)
lowerCamelCase__ = (('num_inference_steps', 50),)
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {"num_train_timesteps": 1000}
config.update(**__a)
return config
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : str = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**__a)
_lowerCAmelCase : List[str] = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase : int = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : int = scheduler_class.from_pretrained(__a)
new_scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[:]
_lowerCAmelCase : Any = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Dict = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : int = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : str = dict(self.forward_default_kwargs)
_lowerCAmelCase : str = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : Tuple = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
_lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase : int = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(__a)
# copy over dummy past residuals
new_scheduler.set_timesteps(__a)
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
_lowerCAmelCase : Dict = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : int = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Optional[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Tuple = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(**__a)
_lowerCAmelCase : Dict = scheduler_class(**__a)
_lowerCAmelCase : Dict = 10
_lowerCAmelCase : Optional[int] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__a)
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : List[str] = model(__a, __a)
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a).prev_sample
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : Union[str, Any] = model(__a, __a)
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a).prev_sample
return sample
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : str = kwargs.pop("num_inference_steps", __a)
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Dict = scheduler_class(**__a)
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__a, "set_timesteps"):
scheduler.set_timesteps(__a)
elif num_inference_steps is not None and not hasattr(__a, "set_timesteps"):
_lowerCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
_lowerCAmelCase : str = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def snake_case__ ( self):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__a, time_step=__a)
def snake_case__ ( self):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
self.check_over_forward(num_inference_steps=__a, time_step=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.full_loop()
_lowerCAmelCase : Any = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 254_0529) < 10
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_snake_case = 5_0000
_snake_case = 5000
_snake_case, _snake_case = os.path.split(__file__)
_snake_case = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Dict = dataset[i]
@get_duration
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
_lowerCAmelCase : str = dataset[i : i + batch_size]
@get_duration
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(0 , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = dataset[i : i + batch_size]
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = {"num examples": SPEED_TEST_N_EXAMPLES}
_lowerCAmelCase : Optional[Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
_lowerCAmelCase : Optional[Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_lowerCAmelCase : Dict = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_lowerCAmelCase : str = generate_example_dataset(
os.path.join(_lowerCamelCase , "dataset.arrow" ) , _lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = func(_lowerCamelCase , **_lowerCamelCase )
print("shuffling dataset" )
_lowerCAmelCase : Tuple = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = func(
_lowerCamelCase , **_lowerCamelCase )
with open(_lowerCamelCase , "wb" ) as f:
f.write(json.dumps(_lowerCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCAmelCase : str = 128
elif "12-12" in model_name:
_lowerCAmelCase : str = 12
_lowerCAmelCase : int = 12
elif "14-14" in model_name:
_lowerCAmelCase : str = 14
_lowerCAmelCase : Optional[Any] = 14
elif "16-16" in model_name:
_lowerCAmelCase : Dict = 16
_lowerCAmelCase : int = 16
else:
raise ValueError("Model not supported" )
_lowerCAmelCase : Dict = "huggingface/label-files"
if "speech-commands" in model_name:
_lowerCAmelCase : str = 35
_lowerCAmelCase : str = "speech-commands-v2-id2label.json"
else:
_lowerCAmelCase : Dict = 527
_lowerCAmelCase : List[Any] = "audioset-id2label.json"
_lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = idalabel
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def A ( _lowerCamelCase ):
'''simple docstring'''
if "module.v" in name:
_lowerCAmelCase : Dict = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_lowerCAmelCase : Optional[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_lowerCAmelCase : Any = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_lowerCAmelCase : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_lowerCAmelCase : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_lowerCAmelCase : str = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCAmelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCAmelCase : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCAmelCase : List[str] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCAmelCase : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCAmelCase : Optional[Any] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_lowerCAmelCase : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : Tuple = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase : Optional[Any] = key.split("." )
_lowerCAmelCase : List[Any] = int(key_split[3] )
_lowerCAmelCase : Any = config.hidden_size
if "weight" in key:
_lowerCAmelCase : Any = val[:dim, :]
_lowerCAmelCase : Tuple = val[dim : dim * 2, :]
_lowerCAmelCase : Tuple = val[-dim:, :]
else:
_lowerCAmelCase : Optional[Any] = val[:dim]
_lowerCAmelCase : Dict = val[dim : dim * 2]
_lowerCAmelCase : str = val[-dim:]
else:
_lowerCAmelCase : int = val
return orig_state_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = get_audio_spectrogram_transformer_config(_lowerCamelCase )
_lowerCAmelCase : List[str] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_lowerCAmelCase : Optional[Any] = model_name_to_url[model_name]
_lowerCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )
# remove some keys
remove_keys(_lowerCamelCase )
# rename some keys
_lowerCAmelCase : Tuple = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
# load 🤗 model
_lowerCAmelCase : str = ASTForAudioClassification(_lowerCamelCase )
model.eval()
model.load_state_dict(_lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCAmelCase : Any = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78
_lowerCAmelCase : str = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26
_lowerCAmelCase : List[Any] = 1_024 if "speech-commands" not in model_name else 128
_lowerCAmelCase : int = ASTFeatureExtractor(mean=_lowerCamelCase , std=_lowerCamelCase , max_length=_lowerCamelCase )
if "speech-commands" in model_name:
_lowerCAmelCase : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
_lowerCAmelCase : Any = dataset[0]["audio"]["array"]
else:
_lowerCAmelCase : Dict = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = torchaudio.load(_lowerCamelCase )
_lowerCAmelCase : List[str] = waveform.squeeze().numpy()
_lowerCAmelCase : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
_lowerCAmelCase : Any = model(**_lowerCamelCase )
_lowerCAmelCase : List[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCAmelCase : Tuple = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCAmelCase : Union[str, Any] = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCAmelCase : Any = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCAmelCase : str = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCAmelCase : List[str] = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCAmelCase : Tuple = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCAmelCase : int = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCAmelCase : int = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_lowerCamelCase , max_perimeter + 1 ):
_lowerCAmelCase : Union[str, Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A ( _lowerCamelCase = 1_000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = pythagorean_triple(_lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase : List[str] = len(_lowerCamelCase ) // 2
_lowerCAmelCase : int = arr[0:mid]
_lowerCAmelCase : Dict = arr[mid:]
_lowerCAmelCase , _lowerCAmelCase : Dict = count_inversions_recursive(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : str = count_inversions_recursive(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = _count_cross_inversions(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[str] = 0
while i < len(_lowerCamelCase ) and j < len(_lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase : List[Any] = count_inversions_bf(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : List[str] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase : Any = count_inversions_bf(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _lowerCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = count_inversions_bf(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = count_inversions_recursive(_lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _lowerCamelCase )
if __name__ == "__main__":
main()
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("" )
print(len(_lowerCamelCase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a, __a, __a=1):
'''simple docstring'''
if self.graph.get(__a):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
_lowerCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(__a):
_lowerCAmelCase : Any = []
def snake_case__ ( self):
'''simple docstring'''
return list(self.graph)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if self.graph.get(__a):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a)
def snake_case__ ( self, __a=-2, __a=-1):
'''simple docstring'''
if s == d:
return []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = []
if s == -2:
_lowerCAmelCase : Tuple = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(__a)
return visited
else:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a) != 0:
_lowerCAmelCase : Optional[int] = stack[len(__a) - 1]
else:
_lowerCAmelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(__a) == 0:
return visited
def snake_case__ ( self, __a=-1):
'''simple docstring'''
if c == -1:
_lowerCAmelCase : Any = floor(random() * 1_0000) + 10
for i in range(__a):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
_lowerCAmelCase : Tuple = floor(random() * c) + 1
if n != i:
self.add_pair(__a, __a, 1)
def snake_case__ ( self, __a=-2):
'''simple docstring'''
_lowerCAmelCase : Any = deque()
_lowerCAmelCase : int = []
if s == -2:
_lowerCAmelCase : Optional[Any] = list(self.graph)[0]
d.append(__a)
visited.append(__a)
while d:
_lowerCAmelCase : List[Any] = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case__ ( self, __a):
'''simple docstring'''
return len(self.graph[u])
def snake_case__ ( self, __a=-2):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Dict = []
if s == -2:
_lowerCAmelCase : Tuple = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : List[str] = s
_lowerCAmelCase : str = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(__a) != 0:
_lowerCAmelCase : str = stack[len(__a) - 1]
else:
_lowerCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(__a) == 0:
return sorted_nodes
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : Union[str, Any] = -2
_lowerCAmelCase : str = []
_lowerCAmelCase : List[str] = s
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
_lowerCAmelCase : Tuple = len(__a) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : List[str] = True
if len(__a) != 0:
_lowerCAmelCase : Tuple = stack[len(__a) - 1]
else:
_lowerCAmelCase : Optional[int] = False
indirect_parents.append(__a)
_lowerCAmelCase : str = s
_lowerCAmelCase : str = ss
# check if se have reached the starting point
if len(__a) == 0:
return list(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = []
_lowerCAmelCase : str = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : Tuple = -2
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : str = s
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
_lowerCAmelCase : Tuple = len(__a) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : Union[str, Any] = True
if len(__a) != 0:
_lowerCAmelCase : Any = stack[len(__a) - 1]
else:
_lowerCAmelCase : Tuple = False
indirect_parents.append(__a)
_lowerCAmelCase : Any = s
_lowerCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(__a) == 0:
return False
def snake_case__ ( self, __a=-2, __a=-1):
'''simple docstring'''
_lowerCAmelCase : Tuple = time()
self.dfs(__a, __a)
_lowerCAmelCase : List[Any] = time()
return end - begin
def snake_case__ ( self, __a=-2):
'''simple docstring'''
_lowerCAmelCase : int = time()
self.bfs(__a)
_lowerCAmelCase : str = time()
return end - begin
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = {}
def snake_case__ ( self, __a, __a, __a=1):
'''simple docstring'''
if self.graph.get(__a):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
_lowerCAmelCase : int = [[w, v]]
# add the other way
if self.graph.get(__a):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
_lowerCAmelCase : Any = [[w, u]]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if self.graph.get(__a):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__a)
# the other way round
if self.graph.get(__a):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__a)
def snake_case__ ( self, __a=-2, __a=-1):
'''simple docstring'''
if s == d:
return []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = []
if s == -2:
_lowerCAmelCase : Optional[int] = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : Any = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : List[str] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(__a)
return visited
else:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__a) != 0:
_lowerCAmelCase : Optional[int] = stack[len(__a) - 1]
else:
_lowerCAmelCase : str = ss
# check if se have reached the starting point
if len(__a) == 0:
return visited
def snake_case__ ( self, __a=-1):
'''simple docstring'''
if c == -1:
_lowerCAmelCase : Tuple = floor(random() * 1_0000) + 10
for i in range(__a):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
_lowerCAmelCase : List[str] = floor(random() * c) + 1
if n != i:
self.add_pair(__a, __a, 1)
def snake_case__ ( self, __a=-2):
'''simple docstring'''
_lowerCAmelCase : List[Any] = deque()
_lowerCAmelCase : int = []
if s == -2:
_lowerCAmelCase : List[str] = list(self.graph)[0]
d.append(__a)
visited.append(__a)
while d:
_lowerCAmelCase : Dict = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def snake_case__ ( self, __a):
'''simple docstring'''
return len(self.graph[u])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : Dict = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : Tuple = -2
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : int = s
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
_lowerCAmelCase : Any = len(__a) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : Union[str, Any] = True
if len(__a) != 0:
_lowerCAmelCase : Optional[Any] = stack[len(__a) - 1]
else:
_lowerCAmelCase : Optional[Any] = False
indirect_parents.append(__a)
_lowerCAmelCase : Dict = s
_lowerCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(__a) == 0:
return list(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Tuple = list(self.graph)[0]
stack.append(__a)
visited.append(__a)
_lowerCAmelCase : Union[str, Any] = -2
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[str] = s
_lowerCAmelCase : str = False
_lowerCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
_lowerCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
_lowerCAmelCase : Tuple = len(__a) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
_lowerCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowerCAmelCase : List[str] = True
if len(__a) != 0:
_lowerCAmelCase : Union[str, Any] = stack[len(__a) - 1]
else:
_lowerCAmelCase : Optional[Any] = False
indirect_parents.append(__a)
_lowerCAmelCase : str = s
_lowerCAmelCase : Dict = ss
# check if se have reached the starting point
if len(__a) == 0:
return False
def snake_case__ ( self):
'''simple docstring'''
return list(self.graph)
def snake_case__ ( self, __a=-2, __a=-1):
'''simple docstring'''
_lowerCAmelCase : List[Any] = time()
self.dfs(__a, __a)
_lowerCAmelCase : Union[str, Any] = time()
return end - begin
def snake_case__ ( self, __a=-2):
'''simple docstring'''
_lowerCAmelCase : int = time()
self.bfs(__a)
_lowerCAmelCase : Any = time()
return end - begin
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_snake_case = numpy.array([0, 0])
_snake_case = numpy.array([0.5, 0.8660254])
_snake_case = numpy.array([1, 0])
_snake_case = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = initial_vectors
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = iteration_step(_lowerCamelCase )
return vectors
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCAmelCase : Optional[Any] = vectors[i + 1]
new_vectors.append(_lowerCamelCase )
_lowerCAmelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = numpy.radians(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = numpy.cos(_lowerCamelCase ), numpy.sin(_lowerCamelCase )
_lowerCAmelCase : str = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCAmelCase , _lowerCAmelCase : Dict = zip(*_lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [0] * len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
_lowerCAmelCase : str = queue.pop(0 )
cnt += 1
topo.append(_lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
if cnt != len(_lowerCamelCase ):
print("Cycle exists" )
else:
print(_lowerCamelCase )
# Adjacency List of Graph
_snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase : int = tesseract_config if tesseract_config is not None else ""
# apply OCR
_lowerCAmelCase : Tuple = to_pil_image(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : int = pil_image.size
_lowerCAmelCase : Dict = pytesseract.image_to_data(_lowerCamelCase , lang=_lowerCamelCase , output_type="dict" , config=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_lowerCAmelCase : Dict = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
_lowerCAmelCase : Optional[int] = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Tuple = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : Optional[int] = []
for x, y, w, h in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
_lowerCAmelCase : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = None, __a = "", **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : int = size if size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : int = get_size_dict(__a)
_lowerCAmelCase : Dict = do_resize
_lowerCAmelCase : str = size
_lowerCAmelCase : Optional[int] = resample
_lowerCAmelCase : Dict = apply_ocr
_lowerCAmelCase : Union[str, Any] = ocr_lang
_lowerCAmelCase : Any = tesseract_config
def snake_case__ ( self, __a, __a, __a = PILImageResampling.BILINEAR, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
_lowerCAmelCase : Any = (size["height"], size["width"])
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Tuple = size if size is not None else self.size
_lowerCAmelCase : Tuple = get_size_dict(__a)
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : Any = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = [to_numpy_array(__a) for image in images]
if apply_ocr:
requires_backends(self, "pytesseract")
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = []
for image in images:
_lowerCAmelCase , _lowerCAmelCase : Tuple = apply_tesseract(__a, __a, __a)
words_batch.append(__a)
boxes_batch.append(__a)
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Dict = [flip_channel_order(__a) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Optional[Any] = BatchFeature(data={"pixel_values": images}, tensor_type=__a)
if apply_ocr:
_lowerCAmelCase : str = words_batch
_lowerCAmelCase : List[Any] = boxes_batch
return data
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = int(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = t // 3_600, (t // 60) % 60, t % 60
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=300 ):
'''simple docstring'''
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowerCAmelCase : Optional[int] = F"{elt:.6f}" if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase_ :
lowerCamelCase__ = 5
lowerCamelCase__ = 0.2
def __init__( self, __a, __a = None, __a = True, __a = None, __a = 300, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = total
_lowerCAmelCase : Any = "" if prefix is None else prefix
_lowerCAmelCase : str = leave
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Optional[int] = width
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
def snake_case__ ( self, __a, __a = False, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = value
if comment is not None:
_lowerCAmelCase : Union[str, Any] = comment
if self.last_value is None:
_lowerCAmelCase : Optional[int] = time.time()
_lowerCAmelCase : Any = value
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Dict = self.warmup
_lowerCAmelCase : Union[str, Any] = 1
self.update_bar(__a)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowerCAmelCase : List[str] = self.elapsed_time / (value - self.start_value)
else:
_lowerCAmelCase : Optional[Any] = None
if value >= self.total:
_lowerCAmelCase : Optional[int] = self.total
_lowerCAmelCase : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowerCAmelCase : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(__a)
_lowerCAmelCase : str = value
_lowerCAmelCase : List[Any] = current_time
if self.average_time_per_item is None:
_lowerCAmelCase : List[str] = 1
else:
_lowerCAmelCase : Tuple = max(int(self.update_every / self.average_time_per_item), 1)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = " " * (len(str(self.total)) - len(str(__a))) + str(__a)
if self.elapsed_time is None:
_lowerCAmelCase : Optional[int] = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
_lowerCAmelCase : Optional[int] = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
_lowerCAmelCase : List[str] = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowerCAmelCase : List[str] = disp.display(disp.HTML(self.html_code), display_id=__a)
else:
self.output.update(disp.HTML(self.html_code))
def snake_case__ ( self):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=None):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : str = None if column_names is None else [column_names]
_lowerCAmelCase : List[Any] = None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowerCAmelCase : Union[str, Any] = disp.display(disp.HTML(self.html_code), display_id=__a)
else:
self.output.update(disp.HTML(self.html_code))
def snake_case__ ( self, __a):
'''simple docstring'''
if self.inner_table is None:
_lowerCAmelCase : str = [list(values.keys()), list(values.values())]
else:
_lowerCAmelCase : List[Any] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__a)
_lowerCAmelCase : Optional[Any] = columns
self.inner_table.append([values[c] for c in columns])
def snake_case__ ( self, __a, __a=None, __a=300):
'''simple docstring'''
_lowerCAmelCase : Any = NotebookProgressBar(__a, prefix=__a, parent=self, width=__a)
return self.child_bar
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = None
self.display()
class UpperCAmelCase_ ( a):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = False
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Union[str, Any] = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
_lowerCAmelCase : List[Any] = NotebookTrainingTracker(state.max_steps, __a)
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : str = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1, comment=f"Epoch {epoch}/{state.num_train_epochs}", force_update=self._force_next_update, )
_lowerCAmelCase : Tuple = False
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
if not has_length(__a):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowerCAmelCase : Any = self.training_tracker.add_child(len(__a))
else:
_lowerCAmelCase : Tuple = NotebookProgressBar(len(__a))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowerCAmelCase : List[Any] = None
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowerCAmelCase : Any = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowerCAmelCase : List[str] = state.global_step
self.training_tracker.write_line(__a)
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
if self.training_tracker is not None:
_lowerCAmelCase : Tuple = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
_lowerCAmelCase : Tuple = log["loss"]
break
if self.first_column == "Epoch":
_lowerCAmelCase : int = int(state.epoch)
else:
_lowerCAmelCase : Optional[Any] = state.global_step
_lowerCAmelCase : Union[str, Any] = "eval"
for k in metrics:
if k.endswith("_loss"):
_lowerCAmelCase : Any = re.sub(R"\_loss$", "", __a)
_lowerCAmelCase : Optional[Any] = metrics.pop("total_flos", __a)
_lowerCAmelCase : str = metrics.pop("epoch", __a)
_lowerCAmelCase : Any = metrics.pop(f"{metric_key_prefix}_runtime", __a)
_lowerCAmelCase : Union[str, Any] = metrics.pop(f"{metric_key_prefix}_samples_per_second", __a)
_lowerCAmelCase : List[str] = metrics.pop(f"{metric_key_prefix}_steps_per_second", __a)
_lowerCAmelCase : List[str] = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", __a)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
_lowerCAmelCase : List[Any] = v
else:
_lowerCAmelCase : str = k.split("_")
_lowerCAmelCase : int = " ".join([part.capitalize() for part in splits[1:]])
_lowerCAmelCase : List[str] = v
self.training_tracker.write_line(__a)
self.training_tracker.remove_child()
_lowerCAmelCase : List[Any] = None
# Evaluation takes a long time so we should force the next update.
_lowerCAmelCase : int = True
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
self.training_tracker.update(
state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=__a)
_lowerCAmelCase : Dict = None
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=False, __a=True, __a="None", __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : int = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : List[str] = num_choices
_lowerCAmelCase : Optional[int] = relative_attention
_lowerCAmelCase : int = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Optional[Any] = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowerCAmelCase : Any = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_config()
_lowerCAmelCase : int = 300
return config
def snake_case__ ( self, __a):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size()), [])
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DebertaModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(__a, attention_mask=__a, token_type_ids=__a)[0]
_lowerCAmelCase : Tuple = model(__a, token_type_ids=__a)[0]
_lowerCAmelCase : str = model(__a)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = DebertaForMaskedLM(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Tuple = DebertaForSequenceClassification(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : str = model(__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(__a)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : Tuple = DebertaForTokenClassification(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a, attention_mask=__a, token_type_ids=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = DebertaForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(
__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DebertaModelTester(self)
_lowerCAmelCase : Optional[int] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = DebertaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = DebertaModel.from_pretrained("microsoft/deberta-base")
_lowerCAmelCase : str = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]])
_lowerCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowerCAmelCase : Dict = model(__a, attention_mask=__a)[0]
# compare the actual values for a slice.
_lowerCAmelCase : List[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __a, atol=1E-4), f"{output[:, 1:4, 1:4]}")
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
import cva
import numpy as np
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if k in (0.04, 0.06):
_lowerCAmelCase : Any = k
_lowerCAmelCase : Tuple = window_size
else:
raise ValueError("invalid k value")
def __str__( self):
'''simple docstring'''
return str(self.k)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = cva.imread(__a, 0)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = img.shape
_lowerCAmelCase : list[list[int]] = []
_lowerCAmelCase : Dict = img.copy()
_lowerCAmelCase : Any = cva.cvtColor(__a, cva.COLOR_GRAY2RGB)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = np.gradient(__a)
_lowerCAmelCase : Dict = dx**2
_lowerCAmelCase : List[Any] = dy**2
_lowerCAmelCase : List[Any] = dx * dy
_lowerCAmelCase : Tuple = 0.04
_lowerCAmelCase : Dict = self.window_size // 2
for y in range(__a, h - offset):
for x in range(__a, w - offset):
_lowerCAmelCase : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase : str = (wxx * wyy) - (wxy**2)
_lowerCAmelCase : List[Any] = wxx + wyy
_lowerCAmelCase : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0), 0)
color_img.itemset((y, x, 1), 0)
color_img.itemset((y, x, 2), 255)
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case, _snake_case = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["YolosFeatureExtractor"]
_snake_case = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=32, __a=16, __a=3, __a=True, __a=True, __a=32, __a=4, __a=[0, 1, 2, 3], __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=0.02, __a=3, __a=[1, 384, 24, 24], __a=True, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = backbone_out_indices
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Union[str, Any] = backbone_featmap_shape
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[int] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=__a, backbone_featmap_shape=self.backbone_featmap_shape, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = DPTModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Tuple = DPTForDepthEstimation(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : str = DPTForSemanticSegmentation(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(__a, labels=__a)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DPTModelTester(self)
_lowerCAmelCase : int = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(__a)
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = True
if model_class in get_values(__a):
continue
_lowerCAmelCase : Any = model_class(__a)
model.to(__a)
model.train()
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(__a, __a, return_labels=__a)
_lowerCAmelCase : int = model(**__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = True
if model_class in get_values(__a) or not model_class.supports_gradient_checkpointing:
continue
_lowerCAmelCase : Optional[Any] = model_class(__a)
model.to(__a)
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : List[str] = self._prepare_for_class(__a, __a, return_labels=__a)
_lowerCAmelCase : List[Any] = model(**__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = _config_zero_init(__a)
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(config=__a)
# Skip the check for the backbone
_lowerCAmelCase : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCAmelCase : Dict = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCAmelCase : List[Any] = DPTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = "add"
with self.assertRaises(__a):
_lowerCAmelCase : Dict = DPTForDepthEstimation(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
_lowerCAmelCase : Optional[int] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(__a)
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
_lowerCAmelCase : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
_lowerCAmelCase : List[str] = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape, __a)
_lowerCAmelCase : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]]).to(__a)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, __a, atol=1E-4))
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Dict = 5
# Realm tok
_lowerCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname, "realm_tokenizer")
os.makedirs(__a, exist_ok=__a)
_lowerCAmelCase : Dict = os.path.join(__a, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname, "realm_block_records")
os.makedirs(__a, exist_ok=__a)
def snake_case__ ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname, "realm_tokenizer"))
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = RealmConfig(num_block_records=self.num_block_records)
return config
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
})
return dataset
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
], dtype=__a, )
return block_records
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records(), tokenizer=self.get_tokenizer(), )
return retriever
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_config()
_lowerCAmelCase : List[Any] = self.get_dummy_retriever()
_lowerCAmelCase : Union[str, Any] = retriever.tokenizer
_lowerCAmelCase : Tuple = np.array([0, 3], dtype="long")
_lowerCAmelCase : str = tokenizer(["Test question"]).input_ids
_lowerCAmelCase : Tuple = tokenizer(
["the fourth"], add_special_tokens=__a, return_token_type_ids=__a, return_attention_mask=__a, ).input_ids
_lowerCAmelCase : List[str] = config.reader_seq_len
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = retriever(
__a, __a, answer_ids=__a, max_length=__a, return_tensors="np")
self.assertEqual(len(__a), 2)
self.assertEqual(len(__a), 2)
self.assertEqual(len(__a), 2)
self.assertEqual(concat_inputs.input_ids.shape, (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape, (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape, (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape, (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]), ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"], )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]), ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"], )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_config()
_lowerCAmelCase : Tuple = self.get_dummy_retriever()
_lowerCAmelCase : str = retriever.tokenizer
_lowerCAmelCase : List[Any] = np.array([0, 3, 5], dtype="long")
_lowerCAmelCase : Optional[Any] = tokenizer(["Test question"]).input_ids
_lowerCAmelCase : List[str] = tokenizer(
["the fourth", "longer longer"], add_special_tokens=__a, return_token_type_ids=__a, return_attention_mask=__a, ).input_ids
_lowerCAmelCase : int = config.reader_seq_len
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = retriever(
__a, __a, answer_ids=__a, max_length=__a, return_tensors="np")
self.assertEqual([False, True, True], __a)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]], __a)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
# Test local path
_lowerCAmelCase : str = retriever.from_pretrained(os.path.join(self.tmpdirname, "realm_block_records"))
self.assertEqual(retriever.block_records[0], B"This is the first record")
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download") as mock_hf_hub_download:
_lowerCAmelCase : Dict = os.path.join(
os.path.join(self.tmpdirname, "realm_block_records"), _REALM_BLOCK_RECORDS_FILENAME)
_lowerCAmelCase : Any = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa")
self.assertEqual(retriever.block_records[0], B"This is the first record")
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = FunnelConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : List[str] = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
with open(_lowerCamelCase , "rb" ) as flax_state_f:
_lowerCAmelCase : Union[str, Any] = from_bytes(_lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_lowerCamelCase ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
_lowerCAmelCase : Dict = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
_lowerCAmelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : Tuple = ""
_lowerCAmelCase : Tuple = flatten_dict(_lowerCamelCase , sep="." )
_lowerCAmelCase : Any = pt_model.state_dict()
# keep track of unexpected & missing keys
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : int = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_lowerCAmelCase : str = flax_key_tuple_array[:-1] + ["weight"]
_lowerCAmelCase : Any = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_lowerCAmelCase : str = flax_key_tuple_array[:-1] + ["weight"]
_lowerCAmelCase : str = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_lowerCAmelCase : Tuple = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
_lowerCAmelCase : Any = ".".join(_lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_lowerCAmelCase : int = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Any = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_lowerCamelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=13, __a=30, __a=2, __a=3, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Dict = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[Any] = ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, )
return config, pixel_values
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxViTModel(config=__a)
_lowerCAmelCase : Dict = model(__a)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[int] = (self.image_size, self.image_size)
_lowerCAmelCase : Any = (self.patch_size, self.patch_size)
_lowerCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.type_sequence_label_size
_lowerCAmelCase : Optional[Any] = FlaxViTForImageClassification(config=__a)
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = FlaxViTForImageClassification(__a)
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowerCAmelCase : str = model(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxViTModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(__a)
_lowerCAmelCase : Optional[Any] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowerCAmelCase : int = self._prepare_for_class(__a, __a)
_lowerCAmelCase : Union[str, Any] = model_class(__a)
@jax.jit
def model_jitted(__a, **__a):
return model(pixel_values=__a, **__a)
with self.subTest("JIT Enabled"):
_lowerCAmelCase : str = model_jitted(**__a).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_lowerCAmelCase : Union[str, Any] = model_jitted(**__a).to_tuple()
self.assertEqual(len(__a), len(__a))
for jitted_output, output in zip(__a, __a):
self.assertEqual(jitted_output.shape, output.shape)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class_name.from_pretrained("google/vit-base-patch16-224")
_lowerCAmelCase : Union[str, Any] = model(np.ones((1, 3, 224, 224)))
self.assertIsNotNone(__a)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_snake_case = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCAmelCase : Any = parser.parse_args()
return args.f
def A ( _lowerCamelCase , _lowerCamelCase="eval" ):
'''simple docstring'''
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , F"{split}_results.json" )
if os.path.exists(_lowerCamelCase ):
with open(_lowerCamelCase , "r" ) as f:
return json.load(_lowerCamelCase )
raise ValueError(F"can't find {path}" )
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( a):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[Any] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(__a, "argv", __a):
run_flax_glue.main()
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[Any] = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__a, "argv", __a):
run_clm_flax.main()
_lowerCAmelCase : int = get_results(__a)
self.assertLess(result["eval_perplexity"], 100)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(__a, "argv", __a):
run_summarization_flax.main()
_lowerCAmelCase : Optional[int] = get_results(__a, split="test")
self.assertGreaterEqual(result["test_rouge1"], 10)
self.assertGreaterEqual(result["test_rouge2"], 2)
self.assertGreaterEqual(result["test_rougeL"], 7)
self.assertGreaterEqual(result["test_rougeLsum"], 7)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(__a, "argv", __a):
run_mlm_flax.main()
_lowerCAmelCase : Dict = get_results(__a)
self.assertLess(result["eval_perplexity"], 42)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : int = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(__a, "argv", __a):
run_ta_mlm_flax.main()
_lowerCAmelCase : Optional[int] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.42)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : int = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(__a, "argv", __a):
run_flax_ner.main()
_lowerCAmelCase : Optional[int] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
self.assertGreaterEqual(result["eval_f1"], 0.3)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(__a, "argv", __a):
run_qa.main()
_lowerCAmelCase : int = get_results(__a)
self.assertGreaterEqual(result["eval_f1"], 30)
self.assertGreaterEqual(result["eval_exact"], 30)
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
_snake_case = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a, __a, __a, __a=0.2, __a=0.2):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = bp_numa
_lowerCAmelCase : Union[str, Any] = bp_numa
_lowerCAmelCase : str = bp_numa
_lowerCAmelCase : int = conva_get[:2]
_lowerCAmelCase : Optional[int] = conva_get[2]
_lowerCAmelCase : List[Any] = size_pa
_lowerCAmelCase : str = rate_w
_lowerCAmelCase : Any = rate_t
_lowerCAmelCase : Dict = [
np.mat(-1 * np.random.rand(self.conva[0], self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_lowerCAmelCase : Tuple = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowerCAmelCase : Dict = np.mat(-1 * np.random.rand(self.num_bpa, self.num_bpa) + 0.5)
_lowerCAmelCase : List[Any] = -2 * np.random.rand(self.conva[1]) + 1
_lowerCAmelCase : str = -2 * np.random.rand(self.num_bpa) + 1
_lowerCAmelCase : List[str] = -2 * np.random.rand(self.num_bpa) + 1
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__a, "wb") as f:
pickle.dump(__a, __a)
print(f"Model saved: {save_path}")
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
with open(__a, "rb") as f:
_lowerCAmelCase : Any = pickle.load(__a) # noqa: S301
_lowerCAmelCase : Tuple = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_lowerCAmelCase : int = model_dic.get("size_pooling1")
_lowerCAmelCase : Optional[Any] = model_dic.get("num_bp1")
_lowerCAmelCase : Optional[int] = model_dic.get("num_bp2")
_lowerCAmelCase : Any = model_dic.get("num_bp3")
_lowerCAmelCase : Dict = model_dic.get("rate_weight")
_lowerCAmelCase : int = model_dic.get("rate_thre")
# create model instance
_lowerCAmelCase : int = CNN(__a, __a, __a, __a, __a, __a, __a)
# modify model parameter
_lowerCAmelCase : Tuple = model_dic.get("w_conv1")
_lowerCAmelCase : Optional[int] = model_dic.get("wkj")
_lowerCAmelCase : List[str] = model_dic.get("vji")
_lowerCAmelCase : List[Any] = model_dic.get("thre_conv1")
_lowerCAmelCase : Union[str, Any] = model_dic.get("thre_bp2")
_lowerCAmelCase : List[Any] = model_dic.get("thre_bp3")
return conv_ins
def snake_case__ ( self, __a):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def snake_case__ ( self, __a):
'''simple docstring'''
return round(__a, 3)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = convs[0]
_lowerCAmelCase : Union[str, Any] = convs[1]
_lowerCAmelCase : Optional[int] = np.shape(__a)[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase : Optional[Any] = []
for i_focus in range(0, size_data - size_conv + 1, __a):
for j_focus in range(0, size_data - size_conv + 1, __a):
_lowerCAmelCase : Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__a)
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[str] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__a):
_lowerCAmelCase : List[Any] = []
for i_focus in range(len(__a)):
_lowerCAmelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus], w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__a))
_lowerCAmelCase : List[str] = np.asmatrix(__a).reshape(
__a, __a)
data_featuremap.append(__a)
# expanding the data slice to One dimenssion
_lowerCAmelCase : Any = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__a))
_lowerCAmelCase : List[str] = np.asarray(__a)
return focus_list, data_featuremap
def snake_case__ ( self, __a, __a, __a="average_pool"):
'''simple docstring'''
_lowerCAmelCase : str = len(featuremaps[0])
_lowerCAmelCase : int = int(size_map / size_pooling)
_lowerCAmelCase : List[Any] = []
for i_map in range(len(__a)):
_lowerCAmelCase : int = featuremaps[i_map]
_lowerCAmelCase : List[Any] = []
for i_focus in range(0, __a, __a):
for j_focus in range(0, __a, __a):
_lowerCAmelCase : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__a))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__a))
_lowerCAmelCase : Union[str, Any] = np.asmatrix(__a).reshape(__a, __a)
featuremap_pooled.append(__a)
return featuremap_pooled
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i in range(len(__a)):
_lowerCAmelCase : str = np.shape(data[i])
_lowerCAmelCase : List[str] = data[i].reshape(1, shapes[0] * shapes[1])
_lowerCAmelCase : Any = data_listed.getA().tolist()[0]
data_expanded.extend(__a)
_lowerCAmelCase : Optional[int] = np.asarray(__a)
return data_expanded
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = np.asarray(__a)
_lowerCAmelCase : int = np.shape(__a)
_lowerCAmelCase : Union[str, Any] = data_mat.reshape(1, shapes[0] * shapes[1])
return data_expanded
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = 0
for i_map in range(__a):
_lowerCAmelCase : List[Any] = np.ones((size_map, size_map))
for i in range(0, __a, __a):
for j in range(0, __a, __a):
_lowerCAmelCase : int = pd_pool[
i_pool
]
_lowerCAmelCase : Any = i_pool + 1
_lowerCAmelCase : Tuple = np.multiply(
__a, np.multiply(out_map[i_map], (1 - out_map[i_map])))
pd_all.append(__a)
return pd_all
def snake_case__ ( self, __a, __a, __a, __a, __a, __a=bool):
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__a)))
print((" - - Shape: Teach_Data ", np.shape(__a)))
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase : List[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(__a)):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase : Optional[Any] = np.asmatrix(datas_train[p])
_lowerCAmelCase : Optional[int] = np.asarray(datas_teach[p])
_lowerCAmelCase , _lowerCAmelCase : str = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : int = self.pooling(__a, self.size_poolinga)
_lowerCAmelCase : Tuple = np.shape(__a)
_lowerCAmelCase : Optional[Any] = self._expand(__a)
_lowerCAmelCase : List[Any] = data_bp_input
_lowerCAmelCase : str = np.dot(__a, self.vji.T) - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(__a)
_lowerCAmelCase : List[str] = np.dot(__a, self.wkj.T) - self.thre_bpa
_lowerCAmelCase : Any = self.sig(__a)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase : Dict = np.multiply(
(data_teach - bp_outa), np.multiply(__a, (1 - bp_outa)))
_lowerCAmelCase : int = np.multiply(
np.dot(__a, self.wkj), np.multiply(__a, (1 - bp_outa)))
_lowerCAmelCase : Union[str, Any] = np.dot(__a, self.vji)
_lowerCAmelCase : int = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase : int = self._calculate_gradient_from_pool(
__a, __a, shape_featuremapa[0], shape_featuremapa[1], self.size_poolinga, )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_lowerCAmelCase : Dict = self._expand_mat(pd_conva_all[k_conv])
_lowerCAmelCase : Union[str, Any] = self.rate_weight * np.dot(__a, __a)
_lowerCAmelCase : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_lowerCAmelCase : List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_lowerCAmelCase : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase : int = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase : Union[str, Any] = rp + 1
_lowerCAmelCase : Optional[int] = error_count / patterns
all_mse.append(__a)
def draw_error():
_lowerCAmelCase : Any = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__a, "+-")
plt.plot(__a, "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__a, alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__a)))
for p in range(len(__a)):
_lowerCAmelCase : Tuple = np.asmatrix(datas_test[p])
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : Optional[Any] = self.pooling(__a, self.size_poolinga)
_lowerCAmelCase : Optional[Any] = self._expand(__a)
_lowerCAmelCase : List[Any] = data_bp_input
_lowerCAmelCase : str = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase : List[Any] = self.sig(__a)
_lowerCAmelCase : Any = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase : Optional[int] = self.sig(__a)
produce_out.extend(bp_outa.getA().tolist())
_lowerCAmelCase : str = [list(map(self.do_round, __a)) for each in produce_out]
return np.asarray(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = np.asmatrix(__a)
_lowerCAmelCase , _lowerCAmelCase : int = self.convolute(
__a, self.conva, self.w_conva, self.thre_conva, conv_step=self.step_conva, )
_lowerCAmelCase : List[str] = self.pooling(__a, self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ):
'''simple docstring'''
_lowerCAmelCase : str = int(round(sample_rate * max_length ) )
if len(_lowerCamelCase ) <= sample_length:
return wav
_lowerCAmelCase : str = randint(0 , len(_lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowerCamelCase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( self):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", __a, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Tuple = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : Tuple = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : Dict = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : Dict = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Tuple = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : str = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : List[str] = [audio["array"] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Tuple = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : Optional[int] = raw_datasets["train"].features[data_args.label_column_name].names
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = {}, {}
for i, label in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Optional[int] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids )
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Optional[int] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
# Initialize our trainer
_lowerCAmelCase : str = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Optional[int] = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : List[str] = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_snake_case = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_snake_case = 0
_snake_case = 0Xe_0_0_0
_snake_case = 0Xe_0_0_1
_snake_case = 0Xe_0_0_2
_snake_case = 0Xe_0_0_3
_snake_case = 0Xe_0_0_4
# Maps special codepoints to human-readable names.
_snake_case = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_snake_case = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=chr(__a), __a=False, __a=2048, **__a, ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : List[Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : Optional[Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : str = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
bos_token=__a, eos_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, model_max_length=__a, **__a, )
# Creates a mapping for looking up the IDs of special symbols.
_lowerCAmelCase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowerCAmelCase : Dict = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowerCAmelCase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowerCAmelCase : Optional[int] = UNICODE_VOCAB_SIZE
_lowerCAmelCase : Any = len(self._special_codepoints)
@property
def snake_case__ ( self):
'''simple docstring'''
return self._unicode_vocab_size
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
try:
return ord(__a)
except TypeError:
raise ValueError(f"invalid token: '{token}'")
def snake_case__ ( self, __a):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__a)
except TypeError:
raise ValueError(f"invalid id: {index}")
def snake_case__ ( self, __a):
'''simple docstring'''
return "".join(__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
_lowerCAmelCase : Optional[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
_lowerCAmelCase : Union[str, Any] = [1] + ([0] * len(__a)) + [1]
if token_ids_a is not None:
result += ([0] * len(__a)) + [1]
return result
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
_lowerCAmelCase : Any = len(cls + token_ids_a + sep) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep) * [1]
return result
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return ()
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase : Any = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase : Optional[Any] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = original_name.split("." )[0]
_lowerCAmelCase : Optional[Any] = key.split("." )
_lowerCAmelCase : str = int(key_list[key_list.index(_lowerCamelCase ) - 2] )
_lowerCAmelCase : Union[str, Any] = int(key_list[key_list.index(_lowerCamelCase ) - 1] )
_lowerCAmelCase : str = orig_block_num - offset
_lowerCAmelCase : Any = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = OrderedDict()
_lowerCAmelCase , _lowerCAmelCase : Dict = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
_lowerCAmelCase : Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
_lowerCAmelCase : str = key[: key.find("proj" )]
_lowerCAmelCase : str = key.replace(_lowerCamelCase , F"patch_embeddings.{total_embed_found}." )
_lowerCAmelCase : str = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
_lowerCAmelCase : Tuple = "poolformer.encoder." + key
if "mlp.fc1" in key:
_lowerCAmelCase : Optional[Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
_lowerCAmelCase : List[Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
_lowerCAmelCase : List[Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
_lowerCAmelCase : str = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
_lowerCAmelCase : Optional[Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
_lowerCAmelCase : Any = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
_lowerCAmelCase : Tuple = key.replace("head" , "classifier" )
_lowerCAmelCase : int = value
return new_state_dict
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PoolFormerConfig()
# set attributes based on model_name
_lowerCAmelCase : Dict = "huggingface/label-files"
_lowerCAmelCase : List[str] = model_name[-3:]
_lowerCAmelCase : List[Any] = 1_000
_lowerCAmelCase : Tuple = "imagenet-1k-id2label.json"
_lowerCAmelCase : str = (1, 1_000)
# set config attributes
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
if size == "s12":
_lowerCAmelCase : List[str] = [2, 2, 6, 2]
_lowerCAmelCase : Tuple = [64, 128, 320, 512]
_lowerCAmelCase : Tuple = 4.0
_lowerCAmelCase : List[str] = 0.9
elif size == "s24":
_lowerCAmelCase : Dict = [4, 4, 12, 4]
_lowerCAmelCase : List[str] = [64, 128, 320, 512]
_lowerCAmelCase : Union[str, Any] = 4.0
_lowerCAmelCase : Any = 0.9
elif size == "s36":
_lowerCAmelCase : List[Any] = [6, 6, 18, 6]
_lowerCAmelCase : Optional[int] = [64, 128, 320, 512]
_lowerCAmelCase : Any = 4.0
_lowerCAmelCase : int = 1e-6
_lowerCAmelCase : int = 0.9
elif size == "m36":
_lowerCAmelCase : List[str] = [6, 6, 18, 6]
_lowerCAmelCase : int = [96, 192, 384, 768]
_lowerCAmelCase : int = 4.0
_lowerCAmelCase : List[Any] = 1e-6
_lowerCAmelCase : List[Any] = 0.95
elif size == "m48":
_lowerCAmelCase : Optional[int] = [8, 8, 24, 8]
_lowerCAmelCase : Tuple = [96, 192, 384, 768]
_lowerCAmelCase : List[str] = 4.0
_lowerCAmelCase : str = 1e-6
_lowerCAmelCase : str = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
_lowerCAmelCase : Optional[int] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
# Prepare image
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : str = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
# rename keys
_lowerCAmelCase : List[str] = rename_keys(_lowerCamelCase )
# create HuggingFace model and load state dict
_lowerCAmelCase : List[Any] = PoolFormerForImageClassification(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Define image processor
_lowerCAmelCase : Dict = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
_lowerCAmelCase : List[str] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
_lowerCAmelCase : Optional[Any] = model(_lowerCamelCase )
_lowerCAmelCase : List[Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
_lowerCAmelCase : Optional[int] = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
_lowerCAmelCase : Tuple = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
_lowerCAmelCase : Tuple = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
_lowerCAmelCase : Tuple = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
_lowerCAmelCase : List[Any] = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_snake_case = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import math
def A ( _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = 0 ):
'''simple docstring'''
_lowerCAmelCase : str = end or len(_lowerCamelCase )
for i in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = i
_lowerCAmelCase : int = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowerCAmelCase : Union[str, Any] = array[temp_index - 1]
temp_index -= 1
_lowerCAmelCase : List[Any] = temp_index_value
return array
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): # Max Heap
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = index
_lowerCAmelCase : List[Any] = 2 * index + 1 # Left Node
_lowerCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowerCAmelCase : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowerCAmelCase : Any = right_index
if largest != index:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = array[largest], array[index]
heapify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = len(_lowerCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for i in range(n - 1 , 0 , -1 ):
_lowerCAmelCase , _lowerCAmelCase : str = array[0], array[i]
heapify(_lowerCamelCase , 0 , _lowerCamelCase )
return array
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = low
_lowerCAmelCase : Optional[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = array[j], array[i]
i += 1
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
return array
_lowerCAmelCase : int = 2 * math.ceil(math.loga(len(_lowerCamelCase ) ) )
_lowerCAmelCase : int = 16
return intro_sort(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowerCamelCase )
max_depth -= 1
_lowerCAmelCase : Dict = median_of_a(_lowerCamelCase , _lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
_lowerCAmelCase : Optional[Any] = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
intro_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = p
return insertion_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by a comma : ").strip()
_snake_case = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
_snake_case = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
_snake_case = frozenset(["prompt", "negative_prompt"])
_snake_case = frozenset([])
_snake_case = frozenset(["image"])
_snake_case = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
_snake_case = frozenset(["image"])
_snake_case = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
_snake_case = frozenset(["prompt", "image", "negative_prompt"])
_snake_case = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
_snake_case = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
_snake_case = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
_snake_case = frozenset(["image", "mask_image"])
_snake_case = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
_snake_case = frozenset(["example_image", "image", "mask_image"])
_snake_case = frozenset(["class_labels"])
_snake_case = frozenset(["class_labels"])
_snake_case = frozenset(["batch_size"])
_snake_case = frozenset([])
_snake_case = frozenset(["batch_size"])
_snake_case = frozenset([])
_snake_case = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
_snake_case = frozenset(["prompt", "negative_prompt"])
_snake_case = frozenset(["input_tokens"])
_snake_case = frozenset(["input_tokens"])
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args.f
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "all_results.json" )
if os.path.exists(_lowerCamelCase ):
with open(_lowerCamelCase , "r" ) as f:
_lowerCAmelCase : str = json.load(_lowerCamelCase )
else:
raise ValueError(F"can't find {path}" )
return results
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( a):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : int = os.path.join(cls.tmpdir, "default_config.yml")
write_basic_config(save_location=cls.configPath)
_lowerCAmelCase : int = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Union[str, Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[Any] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "glue_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
_lowerCAmelCase : Optional[Any] = get_results(__a)
self.assertLess(result["perplexity"], 100)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "clm_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[str] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Union[str, Any] = get_results(__a)
self.assertLess(result["perplexity"], 42)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "mlm_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase : int = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Any = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
self.assertLess(result["train_loss"], 0.5)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "ner_no_trainer")))
@unittest.skip(reason="Fix me @muellerzr")
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Optional[int] = get_results(__a)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"], 28)
self.assertGreaterEqual(result["eval_exact"], 28)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "qa_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Dict = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.8)
self.assertTrue(os.path.exists(os.path.join(__a, "swag_no_trainer")))
@slow
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_rouge1"], 10)
self.assertGreaterEqual(result["eval_rouge2"], 2)
self.assertGreaterEqual(result["eval_rougeL"], 7)
self.assertGreaterEqual(result["eval_rougeLsum"], 7)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "summarization_no_trainer")))
@slow
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_bleu"], 30)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "translation_no_trainer")))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(__a)
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : int = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Union[str, Any] = get_results(__a)
self.assertGreaterEqual(result["eval_overall_accuracy"], 0.10)
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
run_command(self._launch_args + testargs)
_lowerCAmelCase : int = get_results(__a)
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"], 0.6)
self.assertTrue(os.path.exists(os.path.join(__a, "step_1")))
self.assertTrue(os.path.exists(os.path.join(__a, "image_classification_no_trainer")))
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
from sklearn.metrics import recall_score
import datasets
_snake_case = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
_snake_case = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
_snake_case = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"], )
def snake_case__ ( self, __a, __a, __a=None, __a=1, __a="binary", __a=None, __a="warn", ):
'''simple docstring'''
_lowerCAmelCase : Tuple = recall_score(
__a, __a, labels=__a, pos_label=__a, average=__a, sample_weight=__a, zero_division=__a, )
return {"recall": float(__a) if score.size == 1 else score}
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'rwkv'
lowerCamelCase__ = {'max_position_embeddings': 'context_length'}
def __init__( self, __a=5_0277, __a=1024, __a=4096, __a=32, __a=None, __a=None, __a=1E-5, __a=0, __a=0, __a=6, __a=False, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = context_length
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
_lowerCAmelCase : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Tuple = rescale_every
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = bos_token_id
_lowerCAmelCase : Tuple = eos_token_id
super().__init__(
tie_word_embeddings=__a, bos_token_id=__a, eos_token_id=__a, **__a)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
_snake_case = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
_snake_case = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Optional[int] = char
_lowerCAmelCase : Dict = set(_lowerCamelCase )
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, __a, __a, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", **__a, ):
'''simple docstring'''
super().__init__(
bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : str = vocab_file
_lowerCAmelCase : Dict = merges_file
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Optional[int] = 3
self.add_from_file(__a)
_lowerCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : Union[str, Any] = merges_handle.read().split("\n")[:-1]
_lowerCAmelCase : Any = [tuple(merge.split()[:-1]) for merge in merges]
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {}
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : List[Any] = tuple(__a)
_lowerCAmelCase : Optional[Any] = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_lowerCAmelCase : List[str] = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Tuple = bigram
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = 0
while i < len(__a):
try:
_lowerCAmelCase : Tuple = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : int = tuple(__a)
_lowerCAmelCase : Dict = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Optional[int] = get_pairs(__a)
_lowerCAmelCase : str = "@@ ".join(__a)
_lowerCAmelCase : Union[str, Any] = word[:-4]
_lowerCAmelCase : int = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = re.findall(R"\S+\n?", __a)
for token in words:
split_tokens.extend(list(self.bpe(__a).split(" ")))
return split_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a, self.unk_token)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = " ".join(__a).replace("@@ ", "").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Dict = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
if os.path.abspath(self.merges_file) != os.path.abspath(__a):
copyfile(self.merges_file, __a)
return out_vocab_file, out_merge_file
def snake_case__ ( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
try:
with open(__a, "r", encoding="utf-8") as fd:
self.add_from_file(__a)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
return
_lowerCAmelCase : Any = f.readlines()
for lineTmp in lines:
_lowerCAmelCase : Any = lineTmp.strip()
_lowerCAmelCase : str = line.rfind(" ")
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
_lowerCAmelCase : List[Any] = line[:idx]
_lowerCAmelCase : List[str] = len(self.encoder)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float.")
if len(__a) != 0:
_lowerCAmelCase : List[str] = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(__a) != cols:
raise error
for value in row:
if not isinstance(__a, (int, float)):
raise error
_lowerCAmelCase : Union[str, Any] = rows
else:
_lowerCAmelCase : Optional[int] = []
def snake_case__ ( self):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.rows)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.rows[0])
@property
def snake_case__ ( self):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.order[0] == self.order[1]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(__a)
def snake_case__ ( self):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def snake_case__ ( self):
'''simple docstring'''
return bool(self.determinant())
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(__a).determinant()
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(__a, __a)
return -1 * self.get_minor(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
return Matrix(
[
[self.get_minor(__a, __a) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def snake_case__ ( self):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse")
return self.adjugate() * (1 / determinant)
def __repr__( self):
'''simple docstring'''
return str(self.rows)
def __str__( self):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__a) for value in row]) + ".]"
for row in self.rows
])
+ "]"
)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = TypeError("Row must be a list containing all ints and/or floats")
if not isinstance(__a, __a):
raise type_error
for value in row:
if not isinstance(__a, (int, float)):
raise type_error
if len(__a) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix")
if position is None:
self.rows.append(__a)
else:
_lowerCAmelCase : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Tuple = TypeError(
"Column must be a list containing all ints and/or floats")
if not isinstance(__a, __a):
raise type_error
for value in column:
if not isinstance(__a, (int, float)):
raise type_error
if len(__a) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix")
if position is None:
_lowerCAmelCase : str = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
_lowerCAmelCase : str = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return NotImplemented
return self.rows == other.rows
def __ne__( self, __a):
'''simple docstring'''
return not self == other
def __neg__( self):
'''simple docstring'''
return self * -1
def __add__( self, __a):
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self, __a):
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(__a, __a):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second")
return Matrix(
[
[Matrix.dot_product(__a, __a) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix")
def __pow__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
raise TypeError("A Matrix can only be raised to the power of an int")
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power")
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power")
_lowerCAmelCase : Any = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def snake_case__ ( cls, __a, __a):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(__a)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'luke'
def __init__( self, __a=5_0267, __a=50_0000, __a=768, __a=256, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=None, __a=1, __a=0, __a=2, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : List[Any] = entity_vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[Any] = entity_emb_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : str = use_entity_aware_attention
_lowerCAmelCase : Dict = classifier_dropout
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_lowerCAmelCase : str = n - k
# Calculate C(n,k)
for i in range(_lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1)
def A ( _lowerCamelCase ):
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_lowerCAmelCase : Any = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'align_text_model'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=0, __a="absolute", __a=True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Tuple = position_embedding_type
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : int = pad_token_id
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_lowerCAmelCase , _lowerCAmelCase : Dict = cls.get_config_dict(__a, **__a)
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type") == "align":
_lowerCAmelCase : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'align_vision_model'
def __init__( self, __a = 3, __a = 600, __a = 2.0, __a = 3.1, __a = 8, __a = [3, 3, 5, 3, 5, 5, 3], __a = [32, 16, 24, 40, 80, 112, 192], __a = [16, 24, 40, 80, 112, 192, 320], __a = [], __a = [1, 2, 2, 2, 1, 2, 1], __a = [1, 2, 2, 3, 3, 4, 1], __a = [1, 6, 6, 6, 6, 6, 6], __a = 0.25, __a = "swish", __a = 2560, __a = "mean", __a = 0.02, __a = 0.001, __a = 0.99, __a = 0.2, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Tuple = width_coefficient
_lowerCAmelCase : str = depth_coefficient
_lowerCAmelCase : Optional[int] = depth_divisor
_lowerCAmelCase : Optional[Any] = kernel_sizes
_lowerCAmelCase : Optional[int] = in_channels
_lowerCAmelCase : Union[str, Any] = out_channels
_lowerCAmelCase : Any = depthwise_padding
_lowerCAmelCase : Union[str, Any] = strides
_lowerCAmelCase : Dict = num_block_repeats
_lowerCAmelCase : str = expand_ratios
_lowerCAmelCase : str = squeeze_expansion_ratio
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dim
_lowerCAmelCase : str = pooling_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = batch_norm_eps
_lowerCAmelCase : Optional[int] = batch_norm_momentum
_lowerCAmelCase : List[str] = drop_connect_rate
_lowerCAmelCase : List[Any] = sum(__a) * 4
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
cls._set_token_in_kwargs(__a)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = cls.get_config_dict(__a, **__a)
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type") == "align":
_lowerCAmelCase : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'align'
lowerCamelCase__ = True
def __init__( self, __a=None, __a=None, __a=640, __a=1.0, __a=0.02, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if text_config is None:
_lowerCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values.")
if vision_config is None:
_lowerCAmelCase : Optional[Any] = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values.")
_lowerCAmelCase : int = AlignTextConfig(**__a)
_lowerCAmelCase : int = AlignVisionConfig(**__a)
_lowerCAmelCase : List[Any] = projection_dim
_lowerCAmelCase : Optional[Any] = temperature_init_value
_lowerCAmelCase : int = initializer_range
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.vision_config.to_dict()
_lowerCAmelCase : str = self.__class__.model_type
return output
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = None
lowerCamelCase__ = field(default='Translation' , init=a , repr=a)
def __call__( self):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def snake_case__ ( self):
'''simple docstring'''
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = None
lowerCamelCase__ = field(default='TranslationVariableLanguages' , init=a , repr=a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = sorted(set(self.languages)) if self.languages else None
_lowerCAmelCase : str = len(self.languages) if self.languages else None
def __call__( self):
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = set(self.languages)
if self.languages and set(__a) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__a) - lang_set))}) are not in valid set ({', '.join(__a)}).")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : List[str] = []
for lang, text in translation_dict.items():
if isinstance(__a, __a):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : List[str] = zip(*sorted(__a))
return {"language": languages, "translation": translations}
def snake_case__ ( self):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
return True
# Recursive Step
for i in range(_lowerCamelCase ):
if valid_coloring(graph[index] , _lowerCamelCase , _lowerCamelCase ):
# Color current vertex
_lowerCAmelCase : Tuple = i
# Validate coloring
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ):
return True
# Backtrack
_lowerCAmelCase : str = -1
return False
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = [-1] * len(_lowerCamelCase )
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 0 ):
return colored_vertices
return []
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.