code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_snake_case = '''docs/source/en/_toctree.yml'''
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = defaultdict(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(lowerCAmelCase__ )
_lowerCAmelCase : str = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowerCAmelCase : Union[str, Any] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(lowerCAmelCase__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : str = sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase__ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(lowerCAmelCase__ )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
_lowerCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Any = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : str = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : str = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Tuple = clean_doc_toc(lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : str = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : str = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
_lowerCAmelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Dict = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Dict = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[str] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : List[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : Optional[Any] = pipeline_doc["section"]
_lowerCAmelCase : Tuple = clean_doc_toc(lowerCAmelCase__ )
if overwrite:
_lowerCAmelCase : int = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase__ )
# sort overall pipeline doc
_lowerCAmelCase : Any = clean_doc_toc(lowerCAmelCase__ )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Union[str, Any] = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : int = api_doc
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowerCAmelCase__ , allow_unicode=lowerCAmelCase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( __lowerCamelCase):
def __init__( self, __a, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_)
@torch.no_grad()
def __call__( self, __a = 1, __a = None, __a = 0.0, __a = 50, __a = "pil", __a = True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
_lowerCAmelCase : Optional[int] = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_lowerCAmelCase : Any = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : Optional[int] = eta
for t in self.progress_bar(self.scheduler.timesteps):
_lowerCAmelCase : Dict = self.scheduler.scale_model_input(a_, a_)
# predict the noise residual
_lowerCAmelCase : Optional[Any] = self.unet(a_, a_).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[Any] = self.scheduler.step(a_, a_, a_, **a_).prev_sample
# decode the image latents with the VAE
_lowerCAmelCase : str = self.vqvae.decode(a_).sample
_lowerCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : Union[str, Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : Optional[Any] = self.numpy_to_pil(a_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[Any] = 0
def snake_case__ ( self):
'''simple docstring'''
return self.head == self.tail
def snake_case__ ( self, __a):
'''simple docstring'''
self.data.append(__a)
_lowerCAmelCase : Union[str, Any] = self.tail + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.data[self.head]
_lowerCAmelCase : Dict = self.head + 1
return ret
def snake_case__ ( self):
'''simple docstring'''
return self.tail - self.head
def snake_case__ ( self):
'''simple docstring'''
print(self.data)
print("**************")
print(self.data[self.head : self.tail])
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = data
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Tuple = 1
def snake_case__ ( self):
'''simple docstring'''
return self.data
def snake_case__ ( self):
'''simple docstring'''
return self.left
def snake_case__ ( self):
'''simple docstring'''
return self.right
def snake_case__ ( self):
'''simple docstring'''
return self.height
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = data
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = node
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = node
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = height
def A ( _lowerCamelCase ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if a > b:
return a
return b
def A ( _lowerCamelCase ):
'''simple docstring'''
print("left rotation node:" , node.get_data() )
_lowerCAmelCase : List[str] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_snake_case )
_lowerCAmelCase : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_snake_case )
_lowerCAmelCase : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_snake_case )
return ret
def A ( _lowerCamelCase ):
'''simple docstring'''
print("right rotation node:" , node.get_data() )
_lowerCAmelCase : Optional[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_snake_case )
_lowerCAmelCase : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_snake_case )
_lowerCAmelCase : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_snake_case )
return ret
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_snake_case ) )
return right_rotation(_snake_case )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_snake_case ) )
return left_rotation(_snake_case )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if node is None:
return MyNode(_snake_case )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _snake_case ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
_lowerCAmelCase : Tuple = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
_lowerCAmelCase : List[Any] = right_rotation(_snake_case )
else:
_lowerCAmelCase : Tuple = lr_rotation(_snake_case )
else:
node.set_right(insert_node(node.get_right() , _snake_case ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
_lowerCAmelCase : List[str] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
_lowerCAmelCase : Union[str, Any] = rl_rotation(_snake_case )
else:
_lowerCAmelCase : List[str] = left_rotation(_snake_case )
_lowerCAmelCase : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_snake_case )
return node
def A ( _lowerCamelCase ):
'''simple docstring'''
while True:
_lowerCAmelCase : str = root.get_right()
if right_child is None:
break
_lowerCAmelCase : int = right_child
return root.get_data()
def A ( _lowerCamelCase ):
'''simple docstring'''
while True:
_lowerCAmelCase : Union[str, Any] = root.get_left()
if left_child is None:
break
_lowerCAmelCase : str = left_child
return root.get_data()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = root.get_left()
_lowerCAmelCase : Any = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
_lowerCAmelCase : int = get_left_most(_snake_case )
root.set_data(_snake_case )
root.set_right(del_node(_snake_case , _snake_case ) )
elif left_child is not None:
_lowerCAmelCase : Dict = left_child
elif right_child is not None:
_lowerCAmelCase : str = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_snake_case , _snake_case ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_snake_case , _snake_case ) )
if get_height(_snake_case ) - get_height(_snake_case ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
_lowerCAmelCase : Dict = left_rotation(_snake_case )
else:
_lowerCAmelCase : str = rl_rotation(_snake_case )
elif get_height(_snake_case ) - get_height(_snake_case ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
_lowerCAmelCase : List[Any] = right_rotation(_snake_case )
else:
_lowerCAmelCase : Dict = lr_rotation(_snake_case )
_lowerCAmelCase : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_snake_case )
return root
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Any = None
def snake_case__ ( self):
'''simple docstring'''
return get_height(self.root)
def snake_case__ ( self, __a):
'''simple docstring'''
print("insert:" + str(__a))
_lowerCAmelCase : str = insert_node(self.root, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
print("delete:" + str(__a))
if self.root is None:
print("Tree is empty!")
return
_lowerCAmelCase : Union[str, Any] = del_node(self.root, __a)
def __str__( self, ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
_lowerCAmelCase : List[Any] = ""
_lowerCAmelCase : List[Any] = MyQueue()
q.push(self.root)
_lowerCAmelCase : Union[str, Any] = self.get_height()
if layer == 0:
return output
_lowerCAmelCase : Dict = 0
while not q.is_empty():
_lowerCAmelCase : List[str] = q.pop()
_lowerCAmelCase : int = " " * int(math.pow(2, layer - 1))
output += space
if node is None:
output += "*"
q.push(__a)
q.push(__a)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
_lowerCAmelCase : int = cnt + 1
for i in range(100):
if cnt == math.pow(2, __a) - 1:
_lowerCAmelCase : Any = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_snake_case = AVLtree()
_snake_case = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase_ ( lowerCAmelCase__):
@slow
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny", "prajjwal1/bert-tiny")
_lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("bert-base-uncased")
_lowerCAmelCase : Any = bertabert.config.encoder.vocab_size
_lowerCAmelCase : Tuple = tokenizer.sep_token_id
_lowerCAmelCase : Tuple = tokenizer.cls_token_id
_lowerCAmelCase : List[str] = 128
_lowerCAmelCase : Union[str, Any] = datasets.load_dataset("cnn_dailymail", "3.0.0", split="train[:1%]")
_lowerCAmelCase : int = datasets.load_dataset("cnn_dailymail", "3.0.0", split="validation[:1%]")
_lowerCAmelCase : Dict = train_dataset.select(range(32))
_lowerCAmelCase : int = val_dataset.select(range(16))
_lowerCAmelCase : Dict = 4
def _map_to_encoder_decoder_inputs(__a):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCAmelCase : Optional[Any] = tokenizer(batch["article"], padding="max_length", truncation=_SCREAMING_SNAKE_CASE, max_length=512)
_lowerCAmelCase : Optional[int] = tokenizer(batch["highlights"], padding="max_length", truncation=_SCREAMING_SNAKE_CASE, max_length=128)
_lowerCAmelCase : List[Any] = inputs.input_ids
_lowerCAmelCase : Tuple = inputs.attention_mask
_lowerCAmelCase : str = outputs.input_ids
_lowerCAmelCase : Optional[int] = outputs.input_ids.copy()
_lowerCAmelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
_lowerCAmelCase : Tuple = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE) == 512 for x in inputs.input_ids)
assert all(len(_SCREAMING_SNAKE_CASE) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(__a):
_lowerCAmelCase : Dict = pred.label_ids
_lowerCAmelCase : List[str] = pred.predictions
# all unnecessary tokens are removed
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE, skip_special_tokens=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE, skip_special_tokens=_SCREAMING_SNAKE_CASE)
_lowerCAmelCase : List[str] = sum([int(pred_str[i] == label_str[i]) for i in range(len(_SCREAMING_SNAKE_CASE))]) / len(_SCREAMING_SNAKE_CASE)
return {"accuracy": accuracy}
# map train dataset
_lowerCAmelCase : int = train_dataset.map(
_map_to_encoder_decoder_inputs, batched=_SCREAMING_SNAKE_CASE, batch_size=_SCREAMING_SNAKE_CASE, remove_columns=["article", "highlights"], )
train_dataset.set_format(
type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], )
# same for validation dataset
_lowerCAmelCase : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs, batched=_SCREAMING_SNAKE_CASE, batch_size=_SCREAMING_SNAKE_CASE, remove_columns=["article", "highlights"], )
val_dataset.set_format(
type="torch", columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"], )
_lowerCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : str = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE, per_device_train_batch_size=_SCREAMING_SNAKE_CASE, per_device_eval_batch_size=_SCREAMING_SNAKE_CASE, predict_with_generate=_SCREAMING_SNAKE_CASE, evaluation_strategy="steps", do_train=_SCREAMING_SNAKE_CASE, do_eval=_SCREAMING_SNAKE_CASE, warmup_steps=0, eval_steps=2, logging_steps=2, )
# instantiate trainer
_lowerCAmelCase : Tuple = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE, args=_SCREAMING_SNAKE_CASE, compute_metrics=_compute_metrics, train_dataset=_SCREAMING_SNAKE_CASE, eval_dataset=_SCREAMING_SNAKE_CASE, tokenizer=_SCREAMING_SNAKE_CASE, )
# start training
trainer.train()
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = {} # Mapping from char to TrieNode
_lowerCAmelCase : List[Any] = False
def snake_case__ ( self, __a):
'''simple docstring'''
for word in words:
self.insert(UpperCamelCase__)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self
for char in word:
if char not in curr.nodes:
_lowerCAmelCase : Any = TrieNode()
_lowerCAmelCase : Optional[int] = curr.nodes[char]
_lowerCAmelCase : Tuple = True
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self
for char in word:
if char not in curr.nodes:
return False
_lowerCAmelCase : Union[str, Any] = curr.nodes[char]
return curr.is_leaf
def snake_case__ ( self, __a):
'''simple docstring'''
def _delete(__a, __a, __a) -> bool:
if index == len(UpperCamelCase__):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCAmelCase : Tuple = False
return len(curr.nodes) == 0
_lowerCAmelCase : Optional[Any] = word[index]
_lowerCAmelCase : Tuple = curr.nodes.get(UpperCamelCase__)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCAmelCase : Tuple = _delete(UpperCamelCase__, UpperCamelCase__, index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self, UpperCamelCase__, 0)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if node.is_leaf:
print(A_ , end=" " )
for key, value in node.nodes.items():
print_words(A_ , word + key )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
_lowerCAmelCase : Optional[int] = TrieNode()
root.insert_many(A_ )
# print_words(root, "")
assert all(root.find(A_ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
print(str(A_ ) , "works!" if passes else "doesn't work :(" )
def A ( ):
'''simple docstring'''
assert test_trie()
def A ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_snake_case = '''pt'''
elif is_tf_available():
_snake_case = '''tf'''
else:
_snake_case = '''jax'''
class UpperCAmelCase_ ( snake_case__ , unittest.TestCase):
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[int] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small")
def snake_case__ ( self, **__a):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_A)
def snake_case__ ( self, __a, __a=False, __a=20, __a=5):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for i in range(len(_A)):
try:
_lowerCAmelCase : Optional[Any] = tokenizer.decode([i], clean_up_tokenization_spaces=_A)
except UnicodeDecodeError:
pass
toks.append((i, tok))
_lowerCAmelCase : Optional[Any] = list(filter(lambda __a: re.match(R"^[ a-zA-Z]+$", t[1]), _A))
_lowerCAmelCase : Optional[int] = list(filter(lambda __a: [t[0]] == tokenizer.encode(t[1], add_special_tokens=_A), _A))
if max_length is not None and len(_A) > max_length:
_lowerCAmelCase : List[str] = toks[:max_length]
if min_length is not None and len(_A) < min_length and len(_A) > 0:
while len(_A) < min_length:
_lowerCAmelCase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Dict = tokenizer.decode(_A, clean_up_tokenization_spaces=_A)
if " " not in output_txt and len(_A) > 1:
_lowerCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_A)
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_A)
)
if with_prefix_space:
_lowerCAmelCase : Any = ' ' + output_txt
_lowerCAmelCase : str = tokenizer.encode(_A, add_special_tokens=_A)
return output_txt, output_ids
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.ta_base_tokenizer
_lowerCAmelCase : Any = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"])
_lowerCAmelCase : Tuple = tokenizer(["hi", "I went to the gym", ""])
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = 'Unicode €.'
_lowerCAmelCase : Dict = tokenizer(_A)
_lowerCAmelCase : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _A)
# decoding
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(_A)
self.assertEqual(_A, "Unicode €.</s>")
_lowerCAmelCase : List[Any] = tokenizer("e è é ê ë")
_lowerCAmelCase : Union[str, Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _A)
# decoding
_lowerCAmelCase : Optional[int] = tokenizer.decode(_A)
self.assertEqual(_A, "e è é ê ë</s>")
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "e è é ê ë</s>")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.ta_base_tokenizer
_lowerCAmelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCAmelCase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowerCAmelCase : List[Any] = tokenizer(_A, padding=_A, return_tensors=_A)
self.assertIsInstance(_A, _A)
if FRAMEWORK != "jax":
_lowerCAmelCase : Optional[int] = list(batch.input_ids.numpy()[0])
else:
_lowerCAmelCase : Dict = list(batch.input_ids.tolist()[0])
self.assertListEqual(_A, _A)
self.assertEqual((2, 37), batch.input_ids.shape)
self.assertEqual((2, 37), batch.attention_mask.shape)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.ta_base_tokenizer
_lowerCAmelCase : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : Optional[int] = tokenizer(_A, padding=_A, return_tensors=_A)
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _A)
self.assertIn("attention_mask", _A)
self.assertNotIn("decoder_input_ids", _A)
self.assertNotIn("decoder_attention_mask", _A)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.ta_base_tokenizer
_lowerCAmelCase : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
_lowerCAmelCase : Optional[Any] = tokenizer(
text_target=_A, max_length=32, padding="max_length", truncation=_A, return_tensors=_A)
self.assertEqual(32, targets["input_ids"].shape[1])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.ta_base_tokenizer
_lowerCAmelCase : Optional[int] = ['A long paragraph for summarization. </s>']
_lowerCAmelCase : Tuple = ['Summary of the text. </s>']
# fmt: off
_lowerCAmelCase : str = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowerCAmelCase : Dict = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowerCAmelCase : Dict = tokenizer(_A, text_target=_A)
self.assertEqual(_A, batch["input_ids"][0])
self.assertEqual(_A, batch["labels"][0])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
_lowerCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = ' He is very happy, UNwant\u00E9d,running'
_lowerCAmelCase : Any = tokenizer.encode(_A, add_special_tokens=_A)
tokenizer.save_pretrained(_A)
_lowerCAmelCase : int = tokenizer.__class__.from_pretrained(_A)
_lowerCAmelCase : Optional[Any] = after_tokenizer.encode(_A, add_special_tokens=_A)
self.assertListEqual(_A, _A)
shutil.rmtree(_A)
_lowerCAmelCase : int = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(["bim", "bambam"])
_lowerCAmelCase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
_lowerCAmelCase : Any = tokenizer.encode(_A, add_special_tokens=_A)
tokenizer.save_pretrained(_A)
_lowerCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(_A)
_lowerCAmelCase : int = after_tokenizer.encode(_A, add_special_tokens=_A)
self.assertListEqual(_A, _A)
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
_lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(_A, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(_A)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A)
with open(os.path.join(_A, "special_tokens_map.json"), encoding="utf-8") as json_file:
_lowerCAmelCase : Union[str, Any] = json.load(_A)
with open(os.path.join(_A, "tokenizer_config.json"), encoding="utf-8") as json_file:
_lowerCAmelCase : Any = json.load(_A)
_lowerCAmelCase : Optional[int] = [f"<extra_id_{i}>" for i in range(125)]
_lowerCAmelCase : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCAmelCase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A, "special_tokens_map.json"), "w", encoding="utf-8") as outfile:
json.dump(_A, _A)
with open(os.path.join(_A, "tokenizer_config.json"), "w", encoding="utf-8") as outfile:
json.dump(_A, _A)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : str = tokenizer_class.from_pretrained(
_A, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : str = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_A)]
_lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
_A, additional_special_tokens=_A, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A)
_lowerCAmelCase : str = tokenizer_class.from_pretrained(_A)
self.assertTrue(tokenizer.decode([255]) == "")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_tokenizers(fast=_A, do_lower_case=_A)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase : Dict = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_string(_A)
self.assertIsInstance(_A, _A)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
_lowerCAmelCase : Dict = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(
_A, skip_special_tokens=_A)
for attr in attributes_list:
setattr(_A, attr + "_id", _A)
self.assertEqual(getattr(_A, _A), _A)
self.assertEqual(getattr(_A, attr + "_id"), _A)
setattr(_A, attr + "_id", _A)
self.assertEqual(getattr(_A, _A), _A)
self.assertEqual(getattr(_A, attr + "_id"), _A)
setattr(_A, "additional_special_tokens_ids", [])
self.assertListEqual(getattr(_A, "additional_special_tokens"), [])
self.assertListEqual(getattr(_A, "additional_special_tokens_ids"), [])
setattr(_A, "additional_special_tokens_ids", [token_id_to_test_setters])
self.assertListEqual(getattr(_A, "additional_special_tokens"), [token_to_test_setters])
self.assertListEqual(getattr(_A, "additional_special_tokens_ids"), [token_id_to_test_setters])
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
import math
def A ( _lowerCamelCase ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCAmelCase : Optional[Any] = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A ( _lowerCamelCase , _lowerCamelCase=1 , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = factor * value
_lowerCAmelCase : List[str] = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
import functools
from typing import Any
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
_lowerCAmelCase : dict[str, Any] = {}
_lowerCAmelCase : Tuple = """WORD_KEEPER"""
for word in words:
_lowerCAmelCase : Tuple = trie
for c in word:
if c not in trie_node:
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Dict = trie_node[c]
_lowerCAmelCase : str = True
_lowerCAmelCase : str = len(lowerCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase ) -> bool:
if index == len_string:
return True
_lowerCAmelCase : str = trie
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
_lowerCAmelCase : str = trie_node.get(string[i] , lowerCamelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_ , lowerCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = data
_lowerCAmelCase : Optional[int] = None
def __str__( self):
'''simple docstring'''
return f"{self.data}"
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = None
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.top
while node:
yield node.data
_lowerCAmelCase : List[str] = node.next
def __str__( self):
'''simple docstring'''
return "->".join([str(UpperCamelCase_) for item in self])
def __len__( self):
'''simple docstring'''
return len(tuple(iter(self)))
def snake_case__ ( self):
'''simple docstring'''
return self.top is None
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Node(UpperCamelCase_)
if not self.is_empty():
_lowerCAmelCase : Tuple = self.top
_lowerCAmelCase : Dict = node
def snake_case__ ( self):
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack")
assert isinstance(self.top, UpperCamelCase_)
_lowerCAmelCase : Optional[Any] = self.top
_lowerCAmelCase : Any = self.top.next
return pop_node.data
def snake_case__ ( self):
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack")
assert self.top is not None
return self.top.data
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Union[str, Any] = 13
_lowerCAmelCase : int = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : int = self.seq_length + self.mem_len
_lowerCAmelCase : List[Any] = 15
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[int] = 99
_lowerCAmelCase : int = [10, 50, 80]
_lowerCAmelCase : int = 32
_lowerCAmelCase : List[Any] = 32
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : str = 8
_lowerCAmelCase : Dict = 128
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Any = 2
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : Any = self.vocab_size - 1
_lowerCAmelCase : str = 0.01
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case__ ( self):
'''simple docstring'''
random.seed(self.seed)
tf.random.set_seed(self.seed)
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFTransfoXLModel(__a)
_lowerCAmelCase : Any = model(__a).to_tuple()
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a}
_lowerCAmelCase : Union[str, Any] = model(__a).to_tuple()
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFTransfoXLLMHeadModel(__a)
_lowerCAmelCase : Tuple = model(__a).to_tuple()
_lowerCAmelCase : str = {"input_ids": input_ids_a, "labels": lm_labels}
_lowerCAmelCase : Tuple = model(__a).to_tuple()
_lowerCAmelCase : int = model([input_ids_a, mems_a]).to_tuple()
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_lowerCAmelCase : str = model(__a).to_tuple()
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFTransfoXLForSequenceClassification(__a)
_lowerCAmelCase : str = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(_lowerCAmelCase) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Tuple = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
lowerCamelCase__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase__ = () if is_tf_available() else ()
lowerCamelCase__ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = TFTransfoXLModelTester(self)
_lowerCAmelCase : Optional[Any] = ConfigTester(self, config_class=__a, d_embed=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
self.model_tester.set_seed()
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
self.model_tester.set_seed()
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : List[str] = model.get_output_embeddings()
assert isinstance(__a, tf.keras.layers.Layer)
_lowerCAmelCase : List[str] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : int = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def snake_case__ ( self):
'''simple docstring'''
pass
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
_lowerCAmelCase : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]], dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : str = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Dict = model.generate(__a, max_length=200, do_sample=__a)
self.assertListEqual(output_ids[0].numpy().tolist(), __a)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A ( _lowerCamelCase = 2_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [0]
_lowerCAmelCase : Union[str, Any] = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowerCAmelCase : Union[str, Any] = 0
# the area corresponding to the grid that gives the product closest to target
_lowerCAmelCase : Union[str, Any] = 0
# an estimate of b, using the quadratic formula
_lowerCAmelCase : Any = 42
# the largest integer less than b_estimate
_lowerCAmelCase : str = 42
# the largest integer less than b_estimate
_lowerCAmelCase : Dict = 42
# the triangle number corresponding to b_floor
_lowerCAmelCase : Any = 42
# the triangle number corresponding to b_ceil
_lowerCAmelCase : Tuple = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowerCAmelCase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowerCAmelCase : str = floor(a__ )
_lowerCAmelCase : List[str] = ceil(a__ )
_lowerCAmelCase : Tuple = triangle_numbers[b_floor]
_lowerCAmelCase : Union[str, Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowerCAmelCase : Any = triangle_b_first_guess * triangle_a
_lowerCAmelCase : str = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowerCAmelCase : str = triangle_b_second_guess * triangle_a
_lowerCAmelCase : Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( __lowerCAmelCase):
lowerCamelCase__ = '''camembert'''
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=1, __a=0, __a=2, __a="absolute", __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_, bos_token_id=lowerCAmelCase_, eos_token_id=lowerCAmelCase_, **lowerCAmelCase_)
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : List[str] = position_embedding_type
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : str = classifier_dropout
class UpperCAmelCase_ ( __lowerCAmelCase):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ):
'''simple docstring'''
_lowerCAmelCase : str = int(round(sample_rate * max_length ) )
if len(_lowerCamelCase ) <= sample_length:
return wav
_lowerCAmelCase : int = randint(0 , len(_lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowerCamelCase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( self):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", UpperCAmelCase__, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Any = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : int = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : int = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Any = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCamelCase )
_lowerCAmelCase : Dict = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Optional[Any] = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : str = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : str = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : int = raw_datasets['''train'''].features[data_args.label_column_name].names
_lowerCAmelCase : int = {}, {}
for i, label in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = str(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : Any = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids )
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Dict = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
# Initialize our trainer
_lowerCAmelCase : str = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Optional[int] = last_checkpoint
_lowerCAmelCase : List[str] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : Tuple = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_snake_case = logging.get_logger(__name__)
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase : Optional[int] = tf.shape(_lowerCamelCase )
if tensor.shape == tf.TensorShape(_lowerCamelCase ):
return dynamic
_lowerCAmelCase : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowerCamelCase )]
def A ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowerCamelCase , name=_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-5 , _lowerCamelCase=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase : str = tf.nn.moments(_lowerCamelCase , axes=[axis] , keepdims=_lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase : Dict = [1] * inputs.shape.rank
_lowerCAmelCase : List[Any] = shape_list(_lowerCamelCase )[axis]
_lowerCAmelCase : Optional[Any] = tf.reshape(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = tf.reshape(_lowerCamelCase , _lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase : Union[str, Any] = tf.nn.batch_normalization(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , offset=_lowerCamelCase , scale=_lowerCamelCase , variance_epsilon=_lowerCamelCase , )
return outputs
def A ( _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase : Dict = tf.shape(_lowerCamelCase )
_lowerCAmelCase : List[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase : Optional[int] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , tf.Tensor ):
_lowerCAmelCase : Tuple = tf.convert_to_tensor(_lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
_lowerCamelCase , tf.cast(_lowerCamelCase , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(_lowerCamelCase )}) must be smaller than the embedding "
F"layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase : Optional[int] = [x for x in data if len(_lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
_lowerCAmelCase : Dict = np.asarray(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : str = np.array_split(_lowerCamelCase , _lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase : Union[str, Any] = np.array_split(_lowerCamelCase , _lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = chunk_data
else:
_lowerCAmelCase : List[str] = data
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if name in group.attrs:
_lowerCAmelCase : Tuple = [n.decode("utf8" ) if hasattr(_lowerCamelCase , "decode" ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(_lowerCamelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def A ( _lowerCamelCase ):
'''simple docstring'''
def _expand_single_ad_tensor(_lowerCamelCase ):
if isinstance(_lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowerCamelCase )
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_lowerCAmelCase : Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_lowerCAmelCase : Union[str, Any] = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = 0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = right or len(_lowerCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowerCamelCase , _lowerCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
_snake_case = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_snake_case = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_snake_case = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_snake_case = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_snake_case = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_snake_case = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_snake_case = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_snake_case = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase_ :
def __init__( self, __a, __a=3, __a=7, __a=True, __a=True, __a=False, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : str = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=_lowerCamelCase, )
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = FalconModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : Any = model(_lowerCamelCase, attention_mask=_lowerCamelCase)
_lowerCAmelCase : Optional[Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Tuple = FalconModel(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : Dict = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, )
_lowerCAmelCase : List[str] = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, )
_lowerCAmelCase : Optional[int] = model(_lowerCamelCase, attention_mask=_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FalconForCausalLM(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : List[str] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = True
_lowerCAmelCase : int = True
_lowerCAmelCase : Any = FalconForCausalLM(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
# first forward pass
_lowerCAmelCase : Any = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, use_cache=_lowerCamelCase, )
_lowerCAmelCase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
_lowerCAmelCase : str = torch.cat([input_ids, next_tokens], dim=-1)
_lowerCAmelCase : Tuple = torch.cat([input_mask, next_mask], dim=-1)
_lowerCAmelCase : Union[str, Any] = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, output_hidden_states=_lowerCamelCase, )["hidden_states"][0]
_lowerCAmelCase : Any = model(
_lowerCamelCase, attention_mask=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase, output_hidden_states=_lowerCamelCase, )["hidden_states"][0]
# select random slice
_lowerCAmelCase : Optional[int] = ids_tensor((1,), output_from_past.shape[-1]).item()
_lowerCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase, _lowerCamelCase, atol=1E-3))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = config_and_inputs
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
lowerCamelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FalconModelTester(self)
_lowerCAmelCase : Any = ConfigTester(self, config_class=_lowerCamelCase, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , *_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCAmelCase : List[str] = alibi
self.model_tester.create_and_check_model(_lowerCamelCase, *_lowerCamelCase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Optional[int] = input_dict["input_ids"]
_lowerCAmelCase : Dict = input_ids.ne(1).to(_lowerCamelCase)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
_lowerCAmelCase : List[str] = FalconForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : Optional[Any] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, labels=_lowerCamelCase)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : str = "single_label_classification"
_lowerCAmelCase : List[str] = input_dict["input_ids"]
_lowerCAmelCase : Optional[Any] = input_ids.ne(1).to(_lowerCamelCase)
_lowerCAmelCase : Optional[int] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size)
_lowerCAmelCase : Tuple = FalconForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, labels=_lowerCamelCase)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : str = input_dict["input_ids"]
_lowerCAmelCase : str = FalconForCausalLM(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : Tuple = model(_lowerCamelCase, use_cache=_lowerCamelCase)
_lowerCAmelCase : Optional[int] = input_ids.shape[0]
_lowerCAmelCase : Dict = model._convert_to_rw_cache(result.past_key_values)
_lowerCAmelCase : List[Any] = model._convert_cache_to_standard_format(_lowerCamelCase, _lowerCamelCase)
for layer in range(len(_lowerCamelCase)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = 3
_lowerCAmelCase : str = "multi_label_classification"
_lowerCAmelCase : Tuple = input_dict["input_ids"]
_lowerCAmelCase : int = input_ids.ne(1).to(_lowerCamelCase)
_lowerCAmelCase : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size).to(torch.float)
_lowerCAmelCase : int = FalconForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
_lowerCAmelCase : List[str] = model(_lowerCamelCase, attention_mask=_lowerCamelCase, labels=_lowerCamelCase)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_lowerCamelCase, "use_cache"):
return
_lowerCAmelCase : int = model_class(_lowerCamelCase).to(_lowerCamelCase)
if "use_cache" not in inputs:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = model(**_lowerCamelCase)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCAmelCase : List[str] = (
getattr(_lowerCamelCase, "decoder_layers", _lowerCamelCase)
or getattr(_lowerCamelCase, "num_decoder_layers", _lowerCamelCase)
or config.num_hidden_layers
)
_lowerCAmelCase : List[Any] = getattr(_lowerCamelCase, "num_kv_heads", config.num_attention_heads)
_lowerCAmelCase : Optional[int] = getattr(_lowerCamelCase, "d_model", config.hidden_size)
_lowerCAmelCase : List[str] = embed_dim // num_attention_heads
_lowerCAmelCase : str = outputs["past_key_values"]
self.assertEqual(len(_lowerCamelCase), _lowerCamelCase)
_lowerCAmelCase , _lowerCAmelCase : int = inputs["input_ids"].shape
for i in range(_lowerCamelCase):
if config.new_decoder_architecture:
_lowerCAmelCase : Any = config.num_attention_heads
elif config.multi_query:
_lowerCAmelCase : Optional[Any] = 1
self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b")
_lowerCAmelCase : int = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b")
model.eval()
model.to(_lowerCamelCase)
_lowerCAmelCase : Tuple = tokenizer("My favorite food is", return_tensors="pt").to(_lowerCamelCase)
_lowerCAmelCase : int = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
_lowerCAmelCase : Optional[Any] = model.generate(**_lowerCamelCase, do_sample=_lowerCamelCase, max_new_tokens=19)
_lowerCAmelCase : Tuple = tokenizer.batch_decode(_lowerCamelCase)[0]
self.assertEqual(_lowerCamelCase, _lowerCamelCase)
@slow
def snake_case__ ( self):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCAmelCase : str = AutoTokenizer.from_pretrained(_lowerCamelCase)
_lowerCAmelCase : List[str] = FalconForCausalLM.from_pretrained(_lowerCamelCase)
model.eval()
model.to(_lowerCamelCase)
_lowerCAmelCase : Union[str, Any] = tokenizer("My favorite food is", return_tensors="pt").to(_lowerCamelCase)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_lowerCamelCase, do_sample=_lowerCamelCase, max_new_tokens=4)
model.generate(**_lowerCamelCase, do_sample=_lowerCamelCase, max_new_tokens=4)
model.generate(**_lowerCamelCase, num_beams=2, max_new_tokens=4)
@slow
def snake_case__ ( self):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(_lowerCamelCase)
_lowerCAmelCase : Optional[int] = FalconForCausalLM.from_pretrained(_lowerCamelCase)
model.eval()
model.to(device=_lowerCamelCase)
_lowerCAmelCase : Tuple = tokenizer("My favorite food is", return_tensors="pt").to(_lowerCamelCase)
# Test results are the same with and without cache
_lowerCAmelCase : str = model.generate(**_lowerCamelCase, do_sample=_lowerCamelCase, max_new_tokens=20, use_cache=_lowerCamelCase)
_lowerCAmelCase : List[str] = model.generate(**_lowerCamelCase, do_sample=_lowerCamelCase, max_new_tokens=20, use_cache=_lowerCamelCase)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
lowerCamelCase__ = AutoencoderKL
lowerCamelCase__ = 'sample'
lowerCamelCase__ = 1E-2
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = 4
_lowerCAmelCase : int = 3
_lowerCAmelCase : List[Any] = (32, 32)
_lowerCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def snake_case__ ( self):
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case__ ( self):
'''simple docstring'''
return (3, 32, 32)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCAmelCase : str = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase : Tuple = self.model_class(**__lowerCamelCase)
model.to(__lowerCamelCase)
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase : Tuple = model(**__lowerCamelCase).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase : Optional[Any] = torch.randn_like(__lowerCamelCase)
_lowerCAmelCase : Tuple = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase : str = self.model_class(**__lowerCamelCase)
# clone model
model_a.load_state_dict(model.state_dict())
model_a.to(__lowerCamelCase)
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase : Dict = model_a(**__lowerCamelCase).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5)
_lowerCAmelCase : List[str] = dict(model.named_parameters())
_lowerCAmelCase : Optional[Any] = dict(model_a.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data, named_params_a[name].grad.data, atol=5E-5))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]), 0)
model.to(__lowerCamelCase)
_lowerCAmelCase : Optional[int] = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy")
_lowerCAmelCase : Tuple = model.to(__lowerCamelCase)
model.eval()
if torch_device == "mps":
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0)
else:
_lowerCAmelCase : Any = torch.Generator(device=__lowerCamelCase).manual_seed(0)
_lowerCAmelCase : Optional[int] = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), )
_lowerCAmelCase : Dict = image.to(__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(__lowerCamelCase, sample_posterior=__lowerCamelCase, generator=__lowerCamelCase).sample
_lowerCAmelCase : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase : List[str] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
])
elif torch_device == "cpu":
_lowerCAmelCase : str = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026])
else:
_lowerCAmelCase : Any = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485])
self.assertTrue(torch_all_close(__lowerCamelCase, __lowerCamelCase, rtol=1E-2))
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return f"gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase) for s in shape])}.npy"
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self, __a=0, __a=(4, 3, 512, 512), __a=False):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCamelCase, __lowerCamelCase))).to(__lowerCamelCase).to(__lowerCamelCase)
return image
def snake_case__ ( self, __a="CompVis/stable-diffusion-v1-4", __a=False):
'''simple docstring'''
_lowerCAmelCase : Dict = '''fp16''' if fpaa else None
_lowerCAmelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase : List[Any] = AutoencoderKL.from_pretrained(
__lowerCamelCase, subfolder="vae", torch_dtype=__lowerCamelCase, revision=__lowerCamelCase, )
model.to(__lowerCamelCase).eval()
return model
def snake_case__ ( self, __a=0):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(__lowerCamelCase)
return torch.Generator(device=__lowerCamelCase).manual_seed(__lowerCamelCase)
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
])
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_sd_vae_model()
_lowerCAmelCase : Any = self.get_sd_image(__lowerCamelCase)
_lowerCAmelCase : List[str] = self.get_generator(__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : Dict = model(__lowerCamelCase, generator=__lowerCamelCase, sample_posterior=__lowerCamelCase).sample
assert sample.shape == image.shape
_lowerCAmelCase : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase : Any = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=3E-3)
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
])
@require_torch_gpu
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_sd_vae_model(fpaa=__lowerCamelCase)
_lowerCAmelCase : List[str] = self.get_sd_image(__lowerCamelCase, fpaa=__lowerCamelCase)
_lowerCAmelCase : List[Any] = self.get_generator(__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(__lowerCamelCase, generator=__lowerCamelCase, sample_posterior=__lowerCamelCase).sample
assert sample.shape == image.shape
_lowerCAmelCase : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase : Any = torch.tensor(__lowerCamelCase)
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=1E-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
])
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.get_sd_vae_model()
_lowerCAmelCase : Dict = self.get_sd_image(__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : Tuple = model(__lowerCamelCase).sample
assert sample.shape == image.shape
_lowerCAmelCase : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=3E-3)
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
])
@require_torch_gpu
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_sd_vae_model()
_lowerCAmelCase : Union[str, Any] = self.get_sd_image(__lowerCamelCase, shape=(3, 4, 64, 64))
with torch.no_grad():
_lowerCAmelCase : str = model.decode(__lowerCamelCase).sample
assert list(sample.shape) == [3, 3, 512, 512]
_lowerCAmelCase : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase : Any = torch.tensor(__lowerCamelCase)
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=1E-3)
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
])
@require_torch_gpu
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase)
_lowerCAmelCase : Dict = self.get_sd_image(__lowerCamelCase, shape=(3, 4, 64, 64), fpaa=__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model.decode(__lowerCamelCase).sample
assert list(sample.shape) == [3, 3, 512, 512]
_lowerCAmelCase : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase : Union[str, Any] = torch.tensor(__lowerCamelCase)
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=5E-3)
@parameterized.expand([(13,), (16,), (27,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase)
_lowerCAmelCase : str = self.get_sd_image(__lowerCamelCase, shape=(3, 4, 64, 64), fpaa=__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : Any = model.decode(__lowerCamelCase).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model.decode(__lowerCamelCase).sample
assert list(sample.shape) == [3, 3, 512, 512]
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=1E-1)
@parameterized.expand([(13,), (16,), (37,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_sd_vae_model()
_lowerCAmelCase : Dict = self.get_sd_image(__lowerCamelCase, shape=(3, 4, 64, 64))
with torch.no_grad():
_lowerCAmelCase : Tuple = model.decode(__lowerCamelCase).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model.decode(__lowerCamelCase).sample
assert list(sample.shape) == [3, 3, 512, 512]
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=1E-2)
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
])
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_sd_vae_model()
_lowerCAmelCase : Tuple = self.get_sd_image(__lowerCamelCase)
_lowerCAmelCase : Optional[Any] = self.get_generator(__lowerCamelCase)
with torch.no_grad():
_lowerCAmelCase : List[Any] = model.encode(__lowerCamelCase).latent_dist
_lowerCAmelCase : int = dist.sample(generator=__lowerCamelCase)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase : int = torch.tensor(__lowerCamelCase)
_lowerCAmelCase : Any = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(__lowerCamelCase, __lowerCamelCase, atol=__lowerCamelCase)
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_snake_case = 25_0004
_snake_case = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( snake_case__ , unittest.TestCase):
lowerCamelCase__ = MBartaaTokenizer
lowerCamelCase__ = MBartaaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Dict = MBartaaTokenizer(lowercase_, src_lang="en_XX", tgt_lang="ro_RO", keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "<s>"
_lowerCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_), lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_), lowercase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(vocab_keys[-1], "<mask>")
self.assertEqual(len(lowercase_), 1054)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1054)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = MBartaaTokenizer(lowercase_, src_lang="en_XX", tgt_lang="ro_RO", keep_accents=lowercase_)
_lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowercase_, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowercase_, [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."], )
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_, [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."], )
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_, model_name="facebook/mbart-large-50", revision="d3913889c59cd5c9e456b269c376325eabad57e2", )
def snake_case__ ( self):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Union[str, Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(lowercase_, **lowercase_)
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(lowercase_, **lowercase_)
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(lowercase_)
_lowerCAmelCase : Optional[Any] = tokenizer_p.save_pretrained(lowercase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(lowercase_, lowercase_)
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(lowercase_)
_lowerCAmelCase : str = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_, lowercase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase_)
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_)
_lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(lowercase_)
# Checks it save with the same files
self.assertSequenceEqual(lowercase_, lowercase_)
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(lowercase_)
_lowerCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_, lowercase_))
shutil.rmtree(lowercase_)
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(lowercase_, legacy_format=lowercase_)
_lowerCAmelCase : List[str] = tokenizer_p.save_pretrained(lowercase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_lowerCAmelCase : Tuple = tokenizer_r.from_pretrained(lowercase_)
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(lowercase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_, lowercase_))
shutil.rmtree(lowercase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCamelCase__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCamelCase__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCamelCase__ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO")
_lowerCAmelCase : Dict = 1
return cls
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 25_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 25_0004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 25_0020)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"], 25_0038)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowercase_)
def snake_case__ ( self):
'''simple docstring'''
self.assertIn(lowercase_, self.tokenizer.all_special_ids)
_lowerCAmelCase : List[str] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[Any] = self.tokenizer.decode(lowercase_, skip_special_tokens=lowercase_)
_lowerCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowercase_)
self.assertEqual(lowercase_, lowercase_)
self.assertNotIn(self.tokenizer.eos_token, lowercase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], lowercase_)
_lowerCAmelCase : str = 10
_lowerCAmelCase : Any = self.tokenizer(lowercase_, max_length=lowercase_, truncation=lowercase_).input_ids[0]
self.assertEqual(ids[0], lowercase_)
self.assertEqual(ids[-1], 2)
self.assertEqual(len(lowercase_), lowercase_)
def snake_case__ ( self):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [25_0053, 25_0001])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_)
_lowerCAmelCase : int = MBartaaTokenizer.from_pretrained(lowercase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowercase_)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowercase_, return_tensors="pt")
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=len(self.expected_src_tokens), return_tensors="pt", )
_lowerCAmelCase : Tuple = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
self.assertIsInstance(lowercase_, lowercase_)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
_lowerCAmelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowercase_)
self.assertEqual(2, batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text, padding=lowercase_, truncation=lowercase_, max_length=3, return_tensors="pt")
_lowerCAmelCase : List[Any] = self.tokenizer(
text_target=self.tgt_text, padding=lowercase_, truncation=lowercase_, max_length=10, return_tensors="pt")
_lowerCAmelCase : str = targets["input_ids"]
_lowerCAmelCase : List[Any] = shift_tokens_right(lowercase_, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(lowercase_), {
# en_XX, A, test, EOS
"input_ids": [[25_0004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
}, )
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_snake_case = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( _UpperCAmelCase):
lowerCamelCase__ = '''maskformer'''
lowerCamelCase__ = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase__ = ['''resnet''', '''swin''']
lowerCamelCase__ = ['''detr''']
def __init__( self, __a = 256, __a = 256, __a = 0.1, __a = False, __a = None, __a = None, __a = 0.02, __a = 1.0, __a = 1.0, __a = 1.0, __a = 20.0, __a = None, **__a, ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCAmelCase : Optional[Any] = SwinConfig(
image_size=384, in_channels=3, patch_size=4, embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12, drop_path_rate=0.3, out_features=["stage1", "stage2", "stage3", "stage4"], )
if isinstance(A_, A_):
_lowerCAmelCase : Optional[Any] = backbone_config.pop("model_type")
_lowerCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[int] = config_class.from_dict(A_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
f"Supported model types: {','.join(self.backbones_supported)}")
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCAmelCase : List[str] = DetrConfig()
else:
# verify that the decoder is supported
_lowerCAmelCase : Any = (
decoder_config.pop("model_type") if isinstance(A_, A_) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"Transformer Decoder {decoder_type} not supported, please use one of"
f" {','.join(self.decoders_supported)}")
if isinstance(A_, A_):
_lowerCAmelCase : Tuple = CONFIG_MAPPING[decoder_type]
_lowerCAmelCase : str = config_class.from_dict(A_)
_lowerCAmelCase : Optional[Any] = backbone_config
_lowerCAmelCase : List[Any] = decoder_config
# main feature dimension for the model
_lowerCAmelCase : Union[str, Any] = fpn_feature_size
_lowerCAmelCase : Union[str, Any] = mask_feature_size
# initializer
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
_lowerCAmelCase : Any = cross_entropy_weight
_lowerCAmelCase : int = dice_weight
_lowerCAmelCase : Union[str, Any] = mask_weight
_lowerCAmelCase : int = use_auxiliary_loss
_lowerCAmelCase : Tuple = no_object_weight
_lowerCAmelCase : Union[str, Any] = output_auxiliary_logits
_lowerCAmelCase : str = self.decoder_config.encoder_attention_heads
_lowerCAmelCase : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**A_)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(
backbone_config=A_, decoder_config=A_, **A_, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[str] = self.backbone_config.to_dict()
_lowerCAmelCase : List[str] = self.decoder_config.to_dict()
_lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( _lowerCamelCase ):
'''simple docstring'''
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , _lowerCamelCase , )
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : int = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase : Union[str, Any] = image[0].size
_lowerCAmelCase : List[Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCAmelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_lowerCAmelCase : Dict = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : Any = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
_lowerCAmelCase : Union[str, Any] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : Optional[int] = 2.0 * image - 1.0
_lowerCAmelCase : List[str] = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase : Dict = torch.cat(_lowerCamelCase , dim=0 )
return image
def A ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : Any = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCAmelCase : Optional[int] = mask[0].size
_lowerCAmelCase : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase : List[str] = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
_lowerCAmelCase : Dict = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : Any = mask.astype(np.floataa ) / 255.0
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : List[Any] = torch.from_numpy(_lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCAmelCase : Optional[Any] = torch.cat(_lowerCamelCase , dim=0 )
return mask
class UpperCAmelCase_ ( __lowercase):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a, __a, __a = 250, __a = 0.0, __a = 10, __a = 10, __a = None, __a = "pil", __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = image
_lowerCAmelCase : Optional[Any] = _preprocess_image(__a)
_lowerCAmelCase : Optional[int] = original_image.to(device=self.device, dtype=self.unet.dtype)
_lowerCAmelCase : Dict = _preprocess_mask(__a)
_lowerCAmelCase : Any = mask_image.to(device=self.device, dtype=self.unet.dtype)
_lowerCAmelCase : str = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(__a)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_lowerCAmelCase : List[str] = original_image.shape
_lowerCAmelCase : List[Any] = randn_tensor(__a, generator=__a, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(__a, __a, __a, self.device)
_lowerCAmelCase : Tuple = eta
_lowerCAmelCase : List[str] = self.scheduler.timesteps[0] + 1
_lowerCAmelCase : str = generator[0] if isinstance(__a, __a) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
_lowerCAmelCase : Union[str, Any] = self.unet(__a, __a).sample
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : Tuple = self.scheduler.step(__a, __a, __a, __a, __a, __a).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCAmelCase : List[str] = self.scheduler.undo_step(__a, __a, __a)
_lowerCAmelCase : Dict = t
_lowerCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : Tuple = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vision-encoder-decoder'
lowerCamelCase__ = True
def __init__( self, **__a):
'''simple docstring'''
super().__init__(**__UpperCamelCase)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_lowerCAmelCase : Optional[Any] = kwargs.pop("encoder")
_lowerCAmelCase : List[Any] = encoder_config.pop("model_type")
_lowerCAmelCase : Dict = kwargs.pop("decoder")
_lowerCAmelCase : str = decoder_config.pop("model_type")
_lowerCAmelCase : str = AutoConfig.for_model(__UpperCamelCase, **__UpperCamelCase)
_lowerCAmelCase : Dict = AutoConfig.for_model(__UpperCamelCase, **__UpperCamelCase)
_lowerCAmelCase : Dict = True
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config")
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__UpperCamelCase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Dict = self.encoder.to_dict()
_lowerCAmelCase : Dict = self.decoder.to_dict()
_lowerCAmelCase : Union[str, Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}})
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = OrderedDict()
_lowerCAmelCase : str = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
import torch
_lowerCAmelCase : int = OrderedDict()
_lowerCAmelCase : Tuple = super().generate_dummy_inputs(
__UpperCamelCase, batch_size=__UpperCamelCase, seq_length=__UpperCamelCase, is_pair=__UpperCamelCase, framework=__UpperCamelCase)
_lowerCAmelCase , _lowerCAmelCase : Dict = dummy_input["input_ids"].shape
_lowerCAmelCase : List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase : Optional[int] = dummy_input.pop("input_ids")
_lowerCAmelCase : Optional[Any] = dummy_input.pop("attention_mask")
_lowerCAmelCase : Any = torch.zeros(__UpperCamelCase)
return common_inputs
class UpperCAmelCase_ ( a):
@property
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(__UpperCamelCase)
def snake_case__ ( self, __a, __a, __a = "default"):
'''simple docstring'''
_lowerCAmelCase : Dict = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__UpperCamelCase, __UpperCamelCase)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_snake_case = [0, 25, 50]
_snake_case = [25, 50, 75]
_snake_case = fuzz.membership.trimf(X, abca)
_snake_case = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_snake_case = np.ones(75)
_snake_case = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_snake_case = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_snake_case = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_snake_case = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
_snake_case = "Tobias Carryer"
from time import time
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a, __a=int(time())): # noqa: B008
'''simple docstring'''
_lowerCAmelCase : List[str] = multiplier
_lowerCAmelCase : Dict = increment
_lowerCAmelCase : Optional[Any] = modulo
_lowerCAmelCase : List[Any] = seed
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_snake_case = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_snake_case = 25_0004
_snake_case = 25_0020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a__ , unittest.TestCase):
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = MBartTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Union[str, Any] = MBartTokenizer(lowerCamelCase_, keep_accents=lowerCamelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = MBartTokenizer(lowerCamelCase_, keep_accents=lowerCamelCase_)
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCamelCase_, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCamelCase_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase_)
self.assertListEqual(
lowerCamelCase_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase_)
self.assertListEqual(
lowerCamelCase_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def snake_case__ ( self):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Tuple = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_)
_lowerCAmelCase : Any = self.tokenizer_class.from_pretrained(lowerCamelCase_, **lowerCamelCase_)
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Any = tokenizer_r.save_pretrained(lowerCamelCase_)
_lowerCAmelCase : Optional[int] = tokenizer_p.save_pretrained(lowerCamelCase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
_lowerCAmelCase : Union[str, Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(lowerCamelCase_, lowerCamelCase_)
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(lowerCamelCase_)
_lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(lowerCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_, lowerCamelCase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase_)
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_, legacy_format=lowerCamelCase_)
_lowerCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(lowerCamelCase_)
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase_, lowerCamelCase_)
# Checks everything loads correctly in the same way
_lowerCAmelCase : str = tokenizer_r.from_pretrained(lowerCamelCase_)
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(lowerCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_, lowerCamelCase_))
shutil.rmtree(lowerCamelCase_)
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_, legacy_format=lowerCamelCase_)
_lowerCAmelCase : Dict = tokenizer_p.save_pretrained(lowerCamelCase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
_lowerCAmelCase : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase_)
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(lowerCamelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase_, lowerCamelCase_))
shutil.rmtree(lowerCamelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = "facebook/mbart-large-en-ro"
lowerCamelCase__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
lowerCamelCase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
lowerCamelCase__ = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="en_XX", tgt_lang="ro_RO")
_lowerCAmelCase : Optional[int] = 1
return cls
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"], 25_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"], 25_0004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"], 25_0020)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
self.assertIn(lowerCamelCase_, self.tokenizer.all_special_ids)
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_)
_lowerCAmelCase : Any = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCamelCase_)
self.assertEqual(lowerCamelCase_, lowerCamelCase_)
self.assertNotIn(self.tokenizer.eos_token, lowerCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], lowerCamelCase_)
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : List[Any] = self.tokenizer(lowerCamelCase_, max_length=lowerCamelCase_, truncation=lowerCamelCase_).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], lowerCamelCase_)
self.assertEqual(len(lowerCamelCase_), lowerCamelCase_)
def snake_case__ ( self):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]), [25_0026, 25_0001])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = tempfile.mkdtemp()
_lowerCAmelCase : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_)
_lowerCAmelCase : Dict = MBartTokenizer.from_pretrained(lowerCamelCase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCamelCase_)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCamelCase_, return_tensors="pt")
_lowerCAmelCase : Any = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=lowerCamelCase_, truncation=lowerCamelCase_, max_length=len(self.expected_src_tokens), return_tensors="pt", )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch["labels"], self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
_lowerCAmelCase : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, lowerCamelCase_)
self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [])
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer(self.src_text, padding=lowerCamelCase_, truncation=lowerCamelCase_, max_length=3, return_tensors="pt")
_lowerCAmelCase : List[Any] = self.tokenizer(
text_target=self.tgt_text, padding=lowerCamelCase_, truncation=lowerCamelCase_, max_length=10, return_tensors="pt")
_lowerCAmelCase : Any = targets["input_ids"]
_lowerCAmelCase : Any = shift_tokens_right(lowerCamelCase_, self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.decoder_input_ids.shape[1], 10)
@require_torch
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="en_XX", tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(lowerCamelCase_), {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 25_0004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
}, )
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = row, column
_lowerCAmelCase : Any = [[default_value for c in range(UpperCamelCase__)] for r in range(UpperCamelCase__)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : Any = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : Dict = max(UpperCamelCase__, len(str(UpperCamelCase__)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : int = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCamelCase__) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(UpperCamelCase__, (list, tuple)) and len(UpperCamelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(UpperCamelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(UpperCamelCase__)
_lowerCAmelCase : str = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : str = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : int = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(UpperCamelCase__, (int, float)): # Scalar multiplication
_lowerCAmelCase : Union[str, Any] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = self[r, c] * another
return result
elif isinstance(UpperCamelCase__, UpperCamelCase__): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : Tuple = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Any = f"Unsupported type given for another ({type(UpperCamelCase__)})"
raise TypeError(UpperCamelCase__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(UpperCamelCase__, UpperCamelCase__) and isinstance(UpperCamelCase__, UpperCamelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : Dict = v.transpose()
_lowerCAmelCase : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Any = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = 1, 2, -3
_lowerCAmelCase : int = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( __UpperCAmelCase):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = 1 / 255, __a = True, __a = None, __a = True, **__a, ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_)
_lowerCAmelCase : Any = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase : int = get_size_dict(lowerCAmelCase_, default_to_square=lowerCAmelCase_)
_lowerCAmelCase : List[str] = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase : Optional[Any] = get_size_dict(lowerCAmelCase_, param_name="crop_size")
_lowerCAmelCase : Tuple = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : str = resample
_lowerCAmelCase : List[Any] = do_rescale
_lowerCAmelCase : Dict = rescale_factor
_lowerCAmelCase : str = do_center_crop
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : str = do_flip_channel_order
def snake_case__ ( self, __a, __a, __a = PIL.Image.BILINEAR, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_size_dict(lowerCAmelCase_, default_to_square=lowerCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
_lowerCAmelCase : str = get_resize_output_image_size(lowerCAmelCase_, size=size["shortest_edge"], default_to_square=lowerCAmelCase_)
return resize(lowerCAmelCase_, size=lowerCAmelCase_, resample=lowerCAmelCase_, data_format=lowerCAmelCase_, **lowerCAmelCase_)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = get_size_dict(lowerCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(lowerCAmelCase_, size=(size["height"], size["width"]), data_format=lowerCAmelCase_, **lowerCAmelCase_)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(lowerCAmelCase_, scale=lowerCAmelCase_, data_format=lowerCAmelCase_, **lowerCAmelCase_)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return flip_channel_order(lowerCAmelCase_, data_format=lowerCAmelCase_)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : List[str] = resample if resample is not None else self.resample
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : int = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase : Union[str, Any] = size if size is not None else self.size
_lowerCAmelCase : Any = get_size_dict(lowerCAmelCase_, default_to_square=lowerCAmelCase_)
_lowerCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Union[str, Any] = get_size_dict(lowerCAmelCase_, param_name="crop_size")
_lowerCAmelCase : Optional[int] = make_list_of_images(lowerCAmelCase_)
if not valid_images(lowerCAmelCase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : List[str] = [to_numpy_array(lowerCAmelCase_) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=lowerCAmelCase_, size=lowerCAmelCase_, resample=lowerCAmelCase_) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=lowerCAmelCase_, size=lowerCAmelCase_) for image in images]
if do_rescale:
_lowerCAmelCase : str = [self.rescale(image=lowerCAmelCase_, scale=lowerCAmelCase_) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase : Union[str, Any] = [self.flip_channel_order(image=lowerCAmelCase_) for image in images]
_lowerCAmelCase : Any = [to_channel_dimension_format(lowerCAmelCase_, lowerCAmelCase_) for image in images]
_lowerCAmelCase : List[str] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase_, tensor_type=lowerCAmelCase_)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_) != len(lowerCAmelCase_):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(lowerCAmelCase_):
_lowerCAmelCase : Union[str, Any] = target_sizes.numpy()
_lowerCAmelCase : str = []
for idx in range(len(lowerCAmelCase_)):
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=lowerCAmelCase_)
_lowerCAmelCase : str = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowerCAmelCase_)
else:
_lowerCAmelCase : Tuple = logits.argmax(dim=1)
_lowerCAmelCase : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_snake_case = ["bert-base-uncased", "bert-base-cased"]
_snake_case = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = tokenizer
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_UpperCAmelCase)
_lowerCAmelCase : str = TFAutoModel.from_config(_UpperCAmelCase)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer(_UpperCAmelCase)
_lowerCAmelCase : int = self.bert(**_UpperCAmelCase)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Any = [
BertTokenizer.from_pretrained(_UpperCAmelCase) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase : Union[str, Any] = [TFBertTokenizer.from_pretrained(_UpperCAmelCase) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_UpperCAmelCase, use_fast_bert_tokenizer=_UpperCAmelCase)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_lowerCAmelCase : Tuple = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_lowerCAmelCase : Any = list(zip(self.test_sentences, self.test_sentences[::-1]))
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase : List[Any] = tokenizer(_UpperCAmelCase, return_tensors="tf", padding="longest")
_lowerCAmelCase : List[str] = tf_tokenizer(_UpperCAmelCase)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.intaa) == tf_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Dict = tf_tokenizer(self.paired_sentences)
_lowerCAmelCase : str = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.intaa) == separated_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Tuple = tf.function(_UpperCAmelCase)
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase : List[str] = tf.constant(_UpperCAmelCase)
_lowerCAmelCase : Tuple = compiled_tokenizer(_UpperCAmelCase)
_lowerCAmelCase : Union[str, Any] = tf_tokenizer(_UpperCAmelCase)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Dict = ModelToSave(tokenizer=_UpperCAmelCase)
_lowerCAmelCase : Tuple = tf.convert_to_tensor(self.test_sentences)
_lowerCAmelCase : List[str] = model(_UpperCAmelCase) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Tuple = Path(_UpperCAmelCase) / '''saved.model'''
model.save(_UpperCAmelCase)
_lowerCAmelCase : Tuple = tf.keras.models.load_model(_UpperCAmelCase)
_lowerCAmelCase : int = loaded_model(_UpperCAmelCase)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)), 1E-5)
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
import torch
from diffusers import DiffusionPipeline
class UpperCAmelCase_ ( _a):
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A, scheduler=_A)
def __call__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), )
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : str = self.unet(_A, _A).sample
_lowerCAmelCase : Optional[int] = self.scheduler.step(_A, _A, _A).prev_sample
_lowerCAmelCase : Tuple = scheduler_output - scheduler_output + torch.ones_like(_A)
return result
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_snake_case = 4
_snake_case = 3
class UpperCAmelCase_ ( a):
pass
def A ( _lowerCamelCase ):
'''simple docstring'''
for shard in shards:
for i in range(UpperCAmelCase__ ):
yield {"i": i, "shard": shard}
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = int(os.environ["RANK"] )
_lowerCAmelCase : int = int(os.environ["WORLD_SIZE"] )
_lowerCAmelCase : List[str] = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase__ )
parser.add_argument("--local_rank" , type=UpperCAmelCase__ )
parser.add_argument("--num_workers" , type=UpperCAmelCase__ , default=0 )
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : str = args.streaming
_lowerCAmelCase : Optional[Any] = args.num_workers
_lowerCAmelCase : int = {"shards": [F"shard_{shard_idx}" for shard_idx in range(UpperCAmelCase__ )]}
_lowerCAmelCase : List[Any] = IterableDataset.from_generator(UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ )
if not streaming:
_lowerCAmelCase : List[str] = Dataset.from_list(list(UpperCAmelCase__ ) )
_lowerCAmelCase : int = split_dataset_by_node(UpperCAmelCase__ , rank=UpperCAmelCase__ , world_size=UpperCAmelCase__ )
_lowerCAmelCase : List[str] = torch.utils.data.DataLoader(UpperCAmelCase__ , num_workers=UpperCAmelCase__ )
_lowerCAmelCase : Tuple = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_lowerCAmelCase : Dict = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_lowerCAmelCase : List[str] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( a):
lowerCamelCase__ = """SpeechT5FeatureExtractor"""
lowerCamelCase__ = """SpeechT5Tokenizer"""
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
def __call__( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = kwargs.pop("audio", __a)
_lowerCAmelCase : Any = kwargs.pop("text", __a)
_lowerCAmelCase : Any = kwargs.pop("text_target", __a)
_lowerCAmelCase : Optional[int] = kwargs.pop("audio_target", __a)
_lowerCAmelCase : Tuple = kwargs.pop("sampling_rate", __a)
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?")
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?")
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.")
if audio is not None:
_lowerCAmelCase : str = self.feature_extractor(__a, *__a, sampling_rate=__a, **__a)
elif text is not None:
_lowerCAmelCase : int = self.tokenizer(__a, **__a)
else:
_lowerCAmelCase : List[Any] = None
if audio_target is not None:
_lowerCAmelCase : List[str] = self.feature_extractor(audio_target=__a, *__a, sampling_rate=__a, **__a)
_lowerCAmelCase : Union[str, Any] = targets["input_values"]
elif text_target is not None:
_lowerCAmelCase : str = self.tokenizer(__a, **__a)
_lowerCAmelCase : Any = targets["input_ids"]
else:
_lowerCAmelCase : Dict = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : str = labels
_lowerCAmelCase : Union[str, Any] = targets.get("attention_mask")
if decoder_attention_mask is not None:
_lowerCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = kwargs.pop("input_values", __a)
_lowerCAmelCase : int = kwargs.pop("input_ids", __a)
_lowerCAmelCase : Optional[Any] = kwargs.pop("labels", __a)
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs.")
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.")
if input_values is not None:
_lowerCAmelCase : str = self.feature_extractor.pad(__a, *__a, **__a)
elif input_ids is not None:
_lowerCAmelCase : int = self.tokenizer.pad(__a, **__a)
else:
_lowerCAmelCase : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(__a, __a) and "input_ids" in labels[0]):
_lowerCAmelCase : List[Any] = self.tokenizer.pad(__a, **__a)
_lowerCAmelCase : int = targets["input_ids"]
else:
_lowerCAmelCase : Any = self.feature_extractor.feature_size
_lowerCAmelCase : Any = self.feature_extractor.num_mel_bins
_lowerCAmelCase : str = self.feature_extractor.pad(__a, *__a, **__a)
_lowerCAmelCase : List[Any] = feature_size_hack
_lowerCAmelCase : Optional[int] = targets["input_values"]
else:
_lowerCAmelCase : int = None
if inputs is None:
return targets
if targets is not None:
_lowerCAmelCase : int = labels
_lowerCAmelCase : List[str] = targets.get("attention_mask")
if decoder_attention_mask is not None:
_lowerCAmelCase : Union[str, Any] = decoder_attention_mask
return inputs
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ ( __UpperCAmelCase):
def snake_case__ ( self, __a, __a, __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Dict = self.layer[current_layer](__a, __a, head_mask[current_layer])
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __UpperCAmelCase , )
class UpperCAmelCase_ ( __UpperCAmelCase):
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Tuple = BertEncoderWithPabee(__a)
self.init_weights()
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : str = 0
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = threshold
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = patience
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[Any] = 0
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.inference_layers_num / self.inference_instances_num
_lowerCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(__a)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
_lowerCAmelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
_lowerCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : str = torch.ones(__a, device=__a)
if token_type_ids is None:
_lowerCAmelCase : Optional[int] = torch.zeros(__a, dtype=torch.long, device=__a)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : Optional[int] = self.get_extended_attention_mask(__a, __a, __a)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = encoder_hidden_states.size()
_lowerCAmelCase : Optional[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowerCAmelCase : Any = torch.ones(__a, device=__a)
_lowerCAmelCase : Union[str, Any] = self.invert_attention_mask(__a)
else:
_lowerCAmelCase : Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(__a, self.config.num_hidden_layers)
_lowerCAmelCase : str = self.embeddings(
input_ids=__a, position_ids=__a, token_type_ids=__a, inputs_embeds=__a)
_lowerCAmelCase : Union[str, Any] = embedding_output
if self.training:
_lowerCAmelCase : Dict = []
for i in range(self.config.num_hidden_layers):
_lowerCAmelCase : str = self.encoder.adaptive_forward(
__a, current_layer=__a, attention_mask=__a, head_mask=__a)
_lowerCAmelCase : Dict = self.pooler(__a)
_lowerCAmelCase : Tuple = output_layers[i](output_dropout(__a))
res.append(__a)
elif self.patience == 0: # Use all layers for inference
_lowerCAmelCase : List[str] = self.encoder(
__a, attention_mask=__a, head_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, )
_lowerCAmelCase : Union[str, Any] = self.pooler(encoder_outputs[0])
_lowerCAmelCase : int = [output_layers[self.config.num_hidden_layers - 1](__a)]
else:
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
_lowerCAmelCase : List[Any] = self.encoder.adaptive_forward(
__a, current_layer=__a, attention_mask=__a, head_mask=__a)
_lowerCAmelCase : int = self.pooler(__a)
_lowerCAmelCase : Optional[Any] = output_layers[i](__a)
if regression:
_lowerCAmelCase : List[Any] = logits.detach()
if patient_result is not None:
_lowerCAmelCase : str = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
_lowerCAmelCase : Union[str, Any] = 0
else:
_lowerCAmelCase : Any = logits.detach().argmax(dim=1)
if patient_result is not None:
_lowerCAmelCase : Tuple = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(__a)):
patient_counter += 1
else:
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Any = logits
if patient_counter == self.patience:
break
_lowerCAmelCase : Tuple = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __UpperCAmelCase , )
class UpperCAmelCase_ ( __UpperCAmelCase):
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Any = config.num_labels
_lowerCAmelCase : Optional[Any] = BertModelWithPabee(__a)
_lowerCAmelCase : int = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size, self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.bert(
input_ids=__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, output_dropout=self.dropout, output_layers=self.classifiers, regression=self.num_labels == 1, )
_lowerCAmelCase : List[str] = (logits[-1],)
if labels is not None:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[str] = 0
for ix, logits_item in enumerate(__a):
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : str = MSELoss()
_lowerCAmelCase : Tuple = loss_fct(logits_item.view(-1), labels.view(-1))
else:
_lowerCAmelCase : int = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(logits_item.view(-1, self.num_labels), labels.view(-1))
if total_loss is None:
_lowerCAmelCase : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowerCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowerCAmelCase , unittest.TestCase):
lowerCamelCase__ = XGLMTokenizer
lowerCamelCase__ = XGLMTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Dict = XGLMTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = '''<pad>'''
_lowerCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__), lowerCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__), lowerCamelCase__)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<s>")
self.assertEqual(vocab_keys[1], "<pad>")
self.assertEqual(len(lowerCamelCase__), 1008)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1008)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = XGLMTokenizer(lowerCamelCase__, keep_accents=lowerCamelCase__)
_lowerCAmelCase : int = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCamelCase__, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
_lowerCAmelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(lowerCamelCase__)
self.assertListEqual(
lowerCamelCase__, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCamelCase__)
self.assertListEqual(
lowerCamelCase__, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def snake_case__ ( self):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase__, f.name)
_lowerCAmelCase : Optional[int] = XGLMTokenizer(f.name, keep_accents=lowerCamelCase__)
_lowerCAmelCase : Dict = pickle.dumps(lowerCamelCase__)
pickle.loads(lowerCamelCase__)
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = '''I was born in 92000, and this is falsé.'''
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase__)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__)
_lowerCAmelCase : Dict = tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__)
_lowerCAmelCase : List[str] = rust_tokenizer.encode(lowerCamelCase__, add_special_tokens=lowerCamelCase__)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__)
_lowerCAmelCase : Any = self.get_rust_tokenizer()
_lowerCAmelCase : str = tokenizer.encode(lowerCamelCase__)
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__)
self.assertListEqual(lowerCamelCase__, lowerCamelCase__)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '''Hello World!'''
_lowerCAmelCase : Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
_lowerCAmelCase : int = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase__, self.big_tokenizer.encode(lowerCamelCase__))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="facebook/xglm-564M", padding=lowerCamelCase__, )
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_snake_case = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_snake_case = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_snake_case = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), )
def snake_case__ ( self, __a, __a, __a = 1, __a = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a, hypotheses=__a, min_len=__a, max_len=__a)
}
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_snake_case = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_snake_case = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = WATERMARK_BITS
_lowerCAmelCase : Optional[Any] = WatermarkEncoder()
self.encoder.set_watermark("bits", self.watermark)
def snake_case__ ( self, __a):
'''simple docstring'''
if images.shape[-1] < 256:
return images
_lowerCAmelCase : int = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy()
_lowerCAmelCase : Any = [self.encoder.encode(__a, "dwtDct") for image in images]
_lowerCAmelCase : Tuple = torch.from_numpy(np.array(__a)).permute(0, 3, 1, 2)
_lowerCAmelCase : List[Any] = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0)
return images
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_snake_case = ["gpt2"]
_snake_case = "gpt2"
if is_tf_available():
class UpperCAmelCase_ ( tf.Module):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Union[str, Any] = TFGPTaLMHeadModel.from_config(__a)
@tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name="text"),))
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(__a)
_lowerCAmelCase : List[Any] = tokenized["input_ids"].to_tensor()
_lowerCAmelCase : Any = tf.cast(input_ids_dense > 0, tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCAmelCase : int = self.model(input_ids=__a, attention_mask=__a)["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[Any] = [GPTaTokenizer.from_pretrained(__a) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCAmelCase : Any = [TFGPTaTokenizer.from_pretrained(__a) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_lowerCAmelCase : Optional[Any] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we\'re going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_lowerCAmelCase : Tuple = list(zip(self.test_sentences, self.test_sentences[::-1]))
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers):
for test_inputs in self.test_sentences:
_lowerCAmelCase : Dict = tokenizer([test_inputs], return_tensors="tf")
_lowerCAmelCase : Union[str, Any] = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCAmelCase : str = python_outputs[key].numpy()
_lowerCAmelCase : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(__a, tf.intaa) == tf_outputs_values))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Tuple = tf.function(__a)
for test_inputs in self.test_sentences:
_lowerCAmelCase : Dict = tf.constant(__a)
_lowerCAmelCase : List[Any] = compiled_tokenizer(__a)
_lowerCAmelCase : Any = tf_tokenizer(__a)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : List[Any] = ModelToSave(tokenizer=__a)
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCAmelCase : Optional[int] = model.serving(__a) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Any = Path(__a) / "saved.model"
tf.saved_model.save(__a, __a, signatures={"serving_default": model.serving})
_lowerCAmelCase : List[Any] = tf.saved_model.load(__a)
_lowerCAmelCase : Any = loaded_model.signatures["serving_default"](__a)["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCAmelCase : str = tf_tokenizer(__a) # Build model with some sample inputs
_lowerCAmelCase : Tuple = tf_tokenizer.get_config()
_lowerCAmelCase : Union[str, Any] = TFGPTaTokenizer.from_config(__a)
_lowerCAmelCase : Any = model_from_config(__a)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCAmelCase : Tuple = 12_3123
for max_length in [3, 5, 1024]:
_lowerCAmelCase : int = tf.convert_to_tensor([self.test_sentences[0]])
_lowerCAmelCase : Optional[int] = tf_tokenizer(__a, max_length=__a)
_lowerCAmelCase : str = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args.f
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "all_results.json" )
if os.path.exists(_lowerCamelCase ):
with open(_lowerCamelCase , "r" ) as f:
_lowerCAmelCase : str = json.load(_lowerCamelCase )
else:
raise ValueError(F"can't find {path}" )
return results
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( a):
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : int = os.path.join(cls.tmpdir, "default_config.yml")
write_basic_config(save_location=cls.configPath)
_lowerCAmelCase : int = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
shutil.rmtree(cls.tmpdir)
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Union[str, Any] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[Any] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "glue_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs)
_lowerCAmelCase : Optional[Any] = get_results(__a)
self.assertLess(result["perplexity"], 100)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "clm_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[str] = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Union[str, Any] = get_results(__a)
self.assertLess(result["perplexity"], 42)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "mlm_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase : int = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Any = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
self.assertLess(result["train_loss"], 0.5)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "ner_no_trainer")))
@unittest.skip(reason="Fix me @muellerzr")
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Optional[int] = get_results(__a)
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"], 28)
self.assertGreaterEqual(result["eval_exact"], 28)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "qa_no_trainer")))
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Dict = get_results(__a)
self.assertGreaterEqual(result["eval_accuracy"], 0.8)
self.assertTrue(os.path.exists(os.path.join(__a, "swag_no_trainer")))
@slow
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_rouge1"], 10)
self.assertGreaterEqual(result["eval_rouge2"], 2)
self.assertGreaterEqual(result["eval_rougeL"], 7)
self.assertGreaterEqual(result["eval_rougeLsum"], 7)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "summarization_no_trainer")))
@slow
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : List[str] = get_results(__a)
self.assertGreaterEqual(result["eval_bleu"], 30)
self.assertTrue(os.path.exists(os.path.join(__a, "epoch_0")))
self.assertTrue(os.path.exists(os.path.join(__a, "translation_no_trainer")))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(__a)
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : int = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs)
_lowerCAmelCase : Union[str, Any] = get_results(__a)
self.assertGreaterEqual(result["eval_overall_accuracy"], 0.10)
@mock.patch.dict(os.environ, {"WANDB_MODE": "offline"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
run_command(self._launch_args + testargs)
_lowerCAmelCase : int = get_results(__a)
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"], 0.6)
self.assertTrue(os.path.exists(os.path.join(__a, "step_1")))
self.assertTrue(os.path.exists(os.path.join(__a, "image_classification_no_trainer")))
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( a):
lowerCamelCase__ = (IPNDMScheduler,)
lowerCamelCase__ = (('num_inference_steps', 50),)
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {"num_train_timesteps": 1000}
config.update(**__a)
return config
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : str = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**__a)
_lowerCAmelCase : List[str] = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase : int = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : int = scheduler_class.from_pretrained(__a)
new_scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[:]
_lowerCAmelCase : Any = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Dict = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : int = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : str = dict(self.forward_default_kwargs)
_lowerCAmelCase : str = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : Tuple = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
_lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
if time_step is None:
_lowerCAmelCase : int = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(__a)
# copy over dummy past residuals
new_scheduler.set_timesteps(__a)
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
_lowerCAmelCase : Dict = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : int = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Optional[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Tuple = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(**__a)
_lowerCAmelCase : Dict = scheduler_class(**__a)
_lowerCAmelCase : Dict = 10
_lowerCAmelCase : Optional[int] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__a)
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : List[str] = model(__a, __a)
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a).prev_sample
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : Union[str, Any] = model(__a, __a)
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a).prev_sample
return sample
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : str = kwargs.pop("num_inference_steps", __a)
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Dict = scheduler_class(**__a)
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__a, "set_timesteps"):
scheduler.set_timesteps(__a)
elif num_inference_steps is not None and not hasattr(__a, "set_timesteps"):
_lowerCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
_lowerCAmelCase : str = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def snake_case__ ( self):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__a, time_step=__a)
def snake_case__ ( self):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]):
self.check_over_forward(num_inference_steps=__a, time_step=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.full_loop()
_lowerCAmelCase : Any = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 254_0529) < 10
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = FunnelConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : List[str] = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase ) // 2
# choose the middle 3 elements
_lowerCAmelCase : Any = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=2, __a=24, __a=16, __a=True, __a=True, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=10, __a=0.02, __a=None, __a=2, __a=2, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : int = max_length
_lowerCAmelCase : str = num_mel_bins
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = scope
_lowerCAmelCase : Any = frequency_stride
_lowerCAmelCase : Optional[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCAmelCase : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCAmelCase : List[str] = frequency_out_dimension * time_out_dimension
_lowerCAmelCase : Any = num_patches + 2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Any = self.get_config()
return config, input_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ASTModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : List[Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = ASTModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(__a)
_lowerCAmelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : int = ["input_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = ASTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_lowerCAmelCase : Union[str, Any] = torchaudio.load(_lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593")
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.default_feature_extractor
_lowerCAmelCase : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(__a)
_lowerCAmelCase : Any = self.default_feature_extractor
_lowerCAmelCase : Dict = prepare_audio()
_lowerCAmelCase : str = audio.squeeze().numpy()
_lowerCAmelCase : str = feature_extractor(__a, sampling_rate=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : List[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3], __a, atol=1E-4))
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, **__a):
'''simple docstring'''
requires_backends(self, ["bs4"])
super().__init__(**__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = []
_lowerCAmelCase : Optional[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCAmelCase : Union[str, Any] = parent.find_all(child.name, recursive=__a)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(__a) else next(i for i, s in enumerate(__a, 1) if s is child))
_lowerCAmelCase : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(__a, "html.parser")
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = []
for element in html_code.descendants:
if type(__a) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
_lowerCAmelCase : Dict = html.unescape(__a).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a)
_lowerCAmelCase : List[str] = self.xpath_soup(__a)
stringaxtag_seq.append(__a)
stringaxsubs_seq.append(__a)
if len(__a) != len(__a):
raise ValueError("Number of doc strings and xtags does not correspond")
if len(__a) != len(__a):
raise ValueError("Number of doc strings and xsubs does not correspond")
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = ""
for tagname, subs in zip(__a, __a):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = False
# Check that strings has a valid type
if isinstance(__a, __a):
_lowerCAmelCase : Dict = True
elif isinstance(__a, (list, tuple)):
if len(__a) == 0 or isinstance(html_strings[0], __a):
_lowerCAmelCase : str = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"but is of type {type(__a)}.")
_lowerCAmelCase : Optional[Any] = bool(isinstance(__a, (list, tuple)) and (isinstance(html_strings[0], __a)))
if not is_batched:
_lowerCAmelCase : Tuple = [html_strings]
# Get nodes + xpaths
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
for html_string in html_strings:
_lowerCAmelCase : Optional[Any] = self.get_three_from_single(__a)
nodes.append(__a)
_lowerCAmelCase : int = []
for node, tag_list, sub_list in zip(__a, __a, __a):
_lowerCAmelCase : Optional[Any] = self.construct_xpath(__a, __a)
xpath_strings.append(__a)
xpaths.append(__a)
# return as Dict
_lowerCAmelCase : int = {"nodes": nodes, "xpaths": xpaths}
_lowerCAmelCase : Optional[Any] = BatchFeature(data=__a, tensor_type=__a)
return encoded_inputs
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = len(set_a.intersection(_lowerCamelCase ) )
if alternative_union:
_lowerCAmelCase : Dict = len(_lowerCamelCase ) + len(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = len(set_a.union(_lowerCamelCase ) )
return intersection / union
if isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) ):
_lowerCAmelCase : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) + len(_lowerCamelCase )
return len(_lowerCamelCase ) / union
else:
_lowerCAmelCase : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(_lowerCamelCase ) / len(_lowerCamelCase )
return len(_lowerCamelCase ) / len(_lowerCamelCase )
return None
if __name__ == "__main__":
_snake_case = {"a", "b", "c", "d", "e"}
_snake_case = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
from __future__ import annotations
_snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = len(_lowerCamelCase )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for j in range(i + 1 , _lowerCamelCase ):
if arr[i] < arr[j]:
_lowerCAmelCase : Union[str, Any] = arr[j]
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for i, outer in enumerate(_lowerCamelCase ):
_lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowerCAmelCase : str = inner
break
result.append(_lowerCamelCase )
return result
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
_lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(_lowerCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowerCAmelCase : List[str] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_snake_case = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'facebook/nllb-200-distilled-600M'
lowerCamelCase__ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowerCamelCase__ = 'translator'
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ['text', 'text', 'text']
lowerCamelCase__ = ['text']
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language.")
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language.")
_lowerCAmelCase : str = self.lang_to_code[src_lang]
_lowerCAmelCase : Optional[int] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a, return_tensors="pt", src_lang=__a, tgt_lang=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model.generate(**__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=__a)
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
_snake_case = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case = ["a", "b", "c", "d", "e"]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = start
# add current to visited
visited.append(_lowerCamelCase )
_lowerCAmelCase : str = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCAmelCase : Optional[int] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
_lowerCAmelCase : Tuple = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case = topological_sort("a", [], [])
print(sort)
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
from math import ceil
def A ( _lowerCamelCase = 1_001 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_lowerCAmelCase : Dict = 2 * i + 1
_lowerCAmelCase : Optional[int] = 2 * i
_lowerCAmelCase : List[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = (16, 32, 96, 256)
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_lowerCAmelCase : List[Any] = []
for i in range(len(self.block_out_channels) - 1):
_lowerCAmelCase : List[Any] = self.block_out_channels[i]
_lowerCAmelCase : Dict = self.block_out_channels[i + 1]
_lowerCAmelCase : Any = nn.Conv(
__a, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(__a)
_lowerCAmelCase : Any = nn.Conv(
__a, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(__a)
_lowerCAmelCase : Tuple = blocks
_lowerCAmelCase : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.conv_in(__a)
_lowerCAmelCase : str = nn.silu(__a)
for block in self.blocks:
_lowerCAmelCase : str = block(__a)
_lowerCAmelCase : Dict = nn.silu(__a)
_lowerCAmelCase : str = self.conv_out(__a)
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , a , a):
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
lowerCamelCase__ = False
lowerCamelCase__ = (320, 640, 1280, 1280)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1280
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = 'rgb'
lowerCamelCase__ = (16, 32, 96, 256)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCAmelCase : Dict = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase : Any = jnp.ones((1,), dtype=jnp.intaa)
_lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa)
_lowerCAmelCase : List[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
_lowerCAmelCase : List[str] = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase : Union[str, Any] = jax.random.split(__a)
_lowerCAmelCase : List[str] = {"params": params_rng, "dropout": dropout_rng}
return self.init(__a, __a, __a, __a, __a)["params"]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.block_out_channels
_lowerCAmelCase : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCAmelCase : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
_lowerCAmelCase : Dict = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_lowerCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift)
_lowerCAmelCase : Tuple = FlaxTimestepEmbedding(__a, dtype=self.dtype)
_lowerCAmelCase : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_lowerCAmelCase : List[str] = self.only_cross_attention
if isinstance(__a, __a):
_lowerCAmelCase : List[str] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(__a, __a):
_lowerCAmelCase : Any = (num_attention_heads,) * len(self.down_block_types)
# down
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Any = block_out_channels[0]
_lowerCAmelCase : Any = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__a)
for i, down_block_type in enumerate(self.down_block_types):
_lowerCAmelCase : Optional[Any] = output_channel
_lowerCAmelCase : Optional[int] = block_out_channels[i]
_lowerCAmelCase : List[Any] = i == len(__a) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCAmelCase : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_lowerCAmelCase : Optional[Any] = FlaxDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(__a)
for _ in range(self.layers_per_block):
_lowerCAmelCase : str = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__a)
if not is_final_block:
_lowerCAmelCase : int = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__a)
_lowerCAmelCase : Optional[int] = down_blocks
_lowerCAmelCase : int = controlnet_down_blocks
# mid
_lowerCAmelCase : Dict = block_out_channels[-1]
_lowerCAmelCase : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__a, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_lowerCAmelCase : str = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, __a, __a, __a, __a, __a = 1.0, __a = True, __a = False, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_lowerCAmelCase : List[str] = jnp.flip(__a, axis=1)
# 1. time
if not isinstance(__a, jnp.ndarray):
_lowerCAmelCase : List[str] = jnp.array([timesteps], dtype=jnp.intaa)
elif isinstance(__a, jnp.ndarray) and len(timesteps.shape) == 0:
_lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa)
_lowerCAmelCase : int = jnp.expand_dims(__a, 0)
_lowerCAmelCase : Union[str, Any] = self.time_proj(__a)
_lowerCAmelCase : Optional[int] = self.time_embedding(__a)
# 2. pre-process
_lowerCAmelCase : List[str] = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : int = self.conv_in(__a)
_lowerCAmelCase : Optional[Any] = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : Tuple = self.controlnet_cond_embedding(__a)
sample += controlnet_cond
# 3. down
_lowerCAmelCase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = down_block(__a, __a, __a, deterministic=not train)
else:
_lowerCAmelCase : Optional[int] = down_block(__a, __a, deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_lowerCAmelCase : Any = self.mid_block(__a, __a, __a, deterministic=not train)
# 5. contronet blocks
_lowerCAmelCase : str = ()
for down_block_res_sample, controlnet_block in zip(__a, self.controlnet_down_blocks):
_lowerCAmelCase : str = controlnet_block(__a)
controlnet_down_block_res_samples += (down_block_res_sample,)
_lowerCAmelCase : int = controlnet_down_block_res_samples
_lowerCAmelCase : List[Any] = self.controlnet_mid_block(__a)
# 6. scaling
_lowerCAmelCase : List[str] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__a, mid_block_res_sample=__a)
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = os.path.join(args.tf_model_dir , "parameters.json" )
_lowerCAmelCase : Optional[Any] = json.loads(open(_lowerCamelCase ).read() )
if not params:
raise ValueError(
F"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith(".pt" ):
_lowerCAmelCase : Dict = args.output + ".pt"
_lowerCAmelCase : Any = OrderedDict()
with tf.device("/CPU:0" ):
_lowerCAmelCase : List[Any] = tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase : Tuple = reader.get_tensor(_lowerCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_lowerCAmelCase : Any = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_lowerCAmelCase : Union[str, Any] = 8
_lowerCAmelCase : Union[str, Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Tuple = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/moe" ):
_lowerCAmelCase : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_lowerCAmelCase : List[str] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_lowerCAmelCase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_lowerCAmelCase : int = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_lowerCAmelCase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_lowerCAmelCase : List[Any] = key_name[-9:-7]
for i in range(16 ):
_lowerCAmelCase : Tuple = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_lowerCAmelCase : Any = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/mlp" ):
_lowerCAmelCase : List[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_lowerCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/p1/bias" ):
_lowerCAmelCase : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/p2/kernel" ):
_lowerCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/p2/bias" ):
_lowerCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Dict = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/ln" ):
_lowerCAmelCase : Tuple = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_lowerCAmelCase : Dict = "model.blocks.%d.feed_forward.norm.bias" % player
_lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/g" ):
_lowerCAmelCase : str = "model.blocks.%d.feed_forward.norm.weight" % player
_lowerCAmelCase : Any = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : str = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/att" ):
_lowerCAmelCase : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_lowerCAmelCase : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase : Tuple = state[:, 0, :, :]
_lowerCAmelCase : int = state[:, 1, :, :]
_lowerCAmelCase : List[str] = state[:, 2, :, :]
_lowerCAmelCase : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : str = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
_lowerCAmelCase : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_lowerCAmelCase : Optional[Any] = torch.tensor(_lowerCamelCase )
_lowerCAmelCase : Tuple = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/o/kernel" ):
_lowerCAmelCase : str = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_lowerCAmelCase : Dict = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/an" ):
_lowerCAmelCase : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_lowerCAmelCase : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
_lowerCAmelCase : Optional[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/g" ):
_lowerCAmelCase : Tuple = "model.blocks.%d.self_attn.norm.weight" % player
_lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_lowerCAmelCase : Tuple = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_lowerCAmelCase : Union[str, Any] = "model.%s.weight" % nlayer
_lowerCAmelCase : Union[str, Any] = vnp.copy() # same in embedded
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
if key_name.startswith("model/wte" ):
_lowerCAmelCase : Any = "lm_head.weight"
_lowerCAmelCase : str = vnp.copy() # same in embedded
_lowerCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/wob" ):
_lowerCAmelCase : Optional[int] = "final_logits_bias"
_lowerCAmelCase : Dict = vnp.copy() # same in embedded
_lowerCAmelCase : List[str] = state.reshape((1, -1) )
_lowerCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase )
elif key_name == "model/dense/kernel":
_lowerCAmelCase : Union[str, Any] = "model.last_project.weight"
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase : Optional[Any] = "model.last_project.bias"
_lowerCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Dict = torch.tensor(_lowerCamelCase )
torch.save(_lowerCamelCase , args.output )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
_snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'swin2sr'
lowerCamelCase__ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, __a=64, __a=1, __a=3, __a=180, __a=[6, 6, 6, 6, 6, 6], __a=[6, 6, 6, 6, 6, 6], __a=8, __a=2.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=2, __a=1.0, __a="1conv", __a="pixelshuffle", **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Dict = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : Optional[int] = len(__a)
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : int = window_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[Any] = use_absolute_embeddings
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = upscale
_lowerCAmelCase : Optional[int] = img_range
_lowerCAmelCase : str = resi_connection
_lowerCAmelCase : Optional[int] = upsampler
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'bloom'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self, __a=25_0880, __a=64, __a=2, __a=8, __a=1E-5, __a=0.02, __a=True, __a=1, __a=2, __a=False, __a=0.0, __a=0.0, __a=1, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase : Tuple = kwargs.pop("n_embed", __a)
_lowerCAmelCase : List[str] = hidden_size if n_embed is None else n_embed
_lowerCAmelCase : Any = n_layer
_lowerCAmelCase : Dict = n_head
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[int] = use_cache
_lowerCAmelCase : Tuple = pretraining_tp
_lowerCAmelCase : Optional[Any] = apply_residual_connection_post_layernorm
_lowerCAmelCase : Optional[int] = hidden_dropout
_lowerCAmelCase : str = attention_dropout
_lowerCAmelCase : Optional[int] = bos_token_id
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : int = slow_but_exact
super().__init__(bos_token_id=__a, eos_token_id=__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.12')
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__a, task=__a, patching_specs=__a, use_past=__a)
if not getattr(self._config, "pad_token_id", __a):
# TODO: how to do that better?
_lowerCAmelCase : str = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a, direction="inputs", inverted_values_shape=__a)
_lowerCAmelCase : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-3
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = super(__a, self).generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : List[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase : Any = self._config.hidden_size // self.num_attention_heads
_lowerCAmelCase : Tuple = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_lowerCAmelCase : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_lowerCAmelCase : Optional[Any] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_lowerCAmelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : List[str] = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a, __a, dtype=__a)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
return None
class UpperCAmelCase_ :
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
return None
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a, "tf", 12, **__a)
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a, "pt", 12, **__a)
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
from transformers import BertModel
_lowerCAmelCase : List[str] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t") as vocab_file:
vocab_file.write("\n".join(__a))
vocab_file.flush()
_lowerCAmelCase : Optional[Any] = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
_lowerCAmelCase : Any = BertModel(BertConfig(vocab_size=len(__a)))
model.save_pretrained(__a)
self._test_export(__a, "pt", 12, __a)
@require_tf
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase : Tuple = self._test_export(__a, "tf", 12, **__a)
_lowerCAmelCase : int = quantize(Path(__a))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model")
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase : str = self._test_export(__a, "pt", 12, **__a)
_lowerCAmelCase : int = quantize(__a)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model")
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Optional[Any] = Path(__a).joinpath("model.onnx")
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a, __a, __a, __a, __a, **__a)
return path
except Exception as e:
self.fail(__a)
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self):
'''simple docstring'''
from transformers import BertModel
_lowerCAmelCase : Dict = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
_lowerCAmelCase : Any = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(__a, __a, "pt")
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self):
'''simple docstring'''
from transformers import TFBertModel
_lowerCAmelCase : int = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
_lowerCAmelCase : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(__a, __a, "tf")
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = FeatureExtractionPipeline(__a, __a)
_lowerCAmelCase : Any = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_lowerCAmelCase : Optional[int] = infer_shapes(__a, __a)
# Assert all variables are present
self.assertEqual(len(__a), len(__a))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3], __a)
self.assertSequenceEqual(variable_names[3:], __a)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: "batch", 1: "sequence"})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"], {0: "batch", 1: "sequence"})
self.assertDictEqual(shapes["output_1"], {0: "batch"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ["input_ids", "attention_mask", "token_type_ids"]
_lowerCAmelCase : Optional[Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_lowerCAmelCase : Union[str, Any] = ensure_valid_input(FuncContiguousArgs(), __a, __a)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a), 3)
# Should have exactly the same input names
self.assertEqual(set(__a), set(__a))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a, (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCAmelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs(), __a, __a)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a), 1)
self.assertEqual(len(__a), 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens["input_ids"])
self.assertEqual(ordered_input_names[0], "input_ids")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = generate_identified_filename(Path("/home/something/my_fake_model.onnx"), "-test")
self.assertEqual("/home/something/my_fake_model-test.onnx", generated.as_posix())
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.model"}
_snake_case = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
_snake_case = {
"google/rembert": 256,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, __a, __a=False, __a=True, __a=True, __a="[CLS]", __a="[SEP]", __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", **__a, ):
'''simple docstring'''
super().__init__(
do_lower_case=__a, remove_space=__a, keep_accents=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : str = do_lower_case
_lowerCAmelCase : Union[str, Any] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.sp_model)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = d
_lowerCAmelCase : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(__a)
return pieces
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.PieceToId(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.IdToPiece(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.sp_model.decode_pieces(__a)
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error("Vocabulary path ({}) should be a directory".format(__a))
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
return (out_vocab_file,)
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_snake_case = trt.Logger(trt.Logger.WARNING)
_snake_case = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_snake_case = logging.getLogger(__name__)
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_snake_case = parser.parse_args()
if args.tokenizer_name:
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_snake_case = args.per_device_eval_batch_size
_snake_case = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_snake_case = True
_snake_case = "temp_engine/bert-fp32.engine"
if args.fpaa:
_snake_case = "temp_engine/bert-fp16.engine"
if args.inta:
_snake_case = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_snake_case = [network.get_input(i) for i in range(network.num_inputs)]
_snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_snake_case = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_snake_case = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_snake_case = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = np.asarray(inputs["input_ids"] , dtype=np.intaa )
_lowerCAmelCase : List[str] = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
_lowerCAmelCase : Dict = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
_lowerCAmelCase : Any = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Any = end_time - start_time
_lowerCAmelCase : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_snake_case = raw_datasets["validation"].column_names
_snake_case = "question" if "question" in column_names else column_names[0]
_snake_case = "context" if "context" in column_names else column_names[1]
_snake_case = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_snake_case = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_snake_case = min(args.max_seq_length, tokenizer.model_max_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCAmelCase : List[Any] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCAmelCase : str = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCAmelCase : Optional[int] = tokenized_examples.sequence_ids(_lowerCamelCase )
_lowerCAmelCase : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCAmelCase : Any = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCAmelCase : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
_snake_case = raw_datasets["validation"]
# Validation Feature Creation
_snake_case = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_snake_case = default_data_collator
_snake_case = eval_dataset.remove_columns(["example_id", "offset_mapping"])
_snake_case = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="eval" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCAmelCase : Any = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
_lowerCAmelCase : Any = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
_lowerCAmelCase : int = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
_snake_case = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A ( _lowerCamelCase ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
_snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_snake_case = cuda.mem_alloc(h_outputa.nbytes)
_snake_case = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_snake_case = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_snake_case = 0.0
_snake_case = 0
_snake_case = timeit.default_timer()
_snake_case = None
for step, batch in enumerate(eval_dataloader):
_snake_case, _snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_snake_case, _snake_case = outputs
_snake_case = torch.tensor(start_logits)
_snake_case = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_snake_case = nested_truncate(all_preds, len(eval_dataset))
_snake_case = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
_snake_case = post_processing_function(eval_examples, eval_dataset, all_preds)
_snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16_000 ):
'''simple docstring'''
_lowerCAmelCase : str = int(round(sample_rate * max_length ) )
if len(_lowerCamelCase ) <= sample_length:
return wav
_lowerCAmelCase : str = randint(0 , len(_lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(default=a , metadata={'help': 'Name of a dataset from the datasets package'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the training audio paths and labels.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'A file containing the validation audio paths and labels.'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowerCamelCase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
lowerCamelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Name or path of preprocessor config.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
lowerCamelCase__ = field(
default=a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( self):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", __a, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`.")
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : str = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Tuple = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : Tuple = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : Dict = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : Dict = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Tuple = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : str = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : List[str] = [audio["array"] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : Union[str, Any] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Tuple = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : Optional[int] = raw_datasets["train"].features[data_args.label_column_name].names
_lowerCAmelCase : Optional[int] = {}, {}
for i, label in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Optional[int] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids )
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Union[str, Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : Optional[int] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[Any] = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
# Initialize our trainer
_lowerCAmelCase : str = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Optional[int] = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : List[str] = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCamelCase )
trainer.save_metrics("eval" , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = UnCLIPImageVariationPipeline
lowerCamelCase__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowerCamelCase__ = IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowerCamelCase__ = False
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self):
'''simple docstring'''
return 100
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, )
return CLIPVisionModelWithProjection(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Union[str, Any] = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
_lowerCAmelCase : Union[str, Any] = UnCLIPTextProjModel(**__a)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
_lowerCAmelCase : Tuple = UNetaDConditionModel(**__a)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : str = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(1)
_lowerCAmelCase : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.dummy_decoder
_lowerCAmelCase : int = self.dummy_text_proj
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : str = self.dummy_tokenizer
_lowerCAmelCase : Optional[Any] = self.dummy_super_res_first
_lowerCAmelCase : List[Any] = self.dummy_super_res_last
_lowerCAmelCase : Tuple = UnCLIPScheduler(
variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, )
_lowerCAmelCase : Any = UnCLIPScheduler(
variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, )
_lowerCAmelCase : Dict = CLIPImageProcessor(crop_size=32, size=32)
_lowerCAmelCase : str = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def snake_case__ ( self, __a, __a=0, __a=True):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(__a)).to(__a)
if str(__a).startswith("mps"):
_lowerCAmelCase : List[Any] = torch.manual_seed(__a)
else:
_lowerCAmelCase : Dict = torch.Generator(device=__a).manual_seed(__a)
if pil_image:
_lowerCAmelCase : Any = input_image * 0.5 + 0.5
_lowerCAmelCase : int = input_image.clamp(0, 1)
_lowerCAmelCase : Optional[Any] = input_image.cpu().permute(0, 2, 3, 1).float().numpy()
_lowerCAmelCase : Dict = DiffusionPipeline.numpy_to_pil(__a)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**__a)
_lowerCAmelCase : int = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : List[str] = pipe(**__a)
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : str = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Optional[int] = pipe(
**__a, return_dict=__a, )[0]
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**__a)
_lowerCAmelCase : Tuple = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Optional[int] = pipe(**__a)
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Optional[Any] = pipe(
**__a, return_dict=__a, )[0]
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = "cpu"
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**__a)
_lowerCAmelCase : List[str] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : List[str] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
_lowerCAmelCase : Union[str, Any] = pipe(**__a)
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Any = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Tuple = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
_lowerCAmelCase : Dict = pipe(
**__a, return_dict=__a, )[0]
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_lowerCAmelCase : Dict = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.device("cpu")
class UpperCAmelCase_ :
lowerCamelCase__ = 1
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**__a)
_lowerCAmelCase : Dict = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = torch.Generator(device=__a).manual_seed(0)
_lowerCAmelCase : List[str] = pipe.decoder.dtype
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_lowerCAmelCase : List[Any] = pipe.prepare_latents(
__a, dtype=__a, device=__a, generator=__a, latents=__a, scheduler=DummyScheduler())
_lowerCAmelCase : Union[str, Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_lowerCAmelCase : int = pipe.prepare_latents(
__a, dtype=__a, device=__a, generator=__a, latents=__a, scheduler=DummyScheduler())
_lowerCAmelCase : str = self.get_dummy_inputs(__a, pil_image=__a)
_lowerCAmelCase : Dict = pipe(
**__a, decoder_latents=__a, super_res_latents=__a).images
_lowerCAmelCase : int = self.get_dummy_inputs(__a, pil_image=__a)
# Don't pass image, instead pass embedding
_lowerCAmelCase : Union[str, Any] = pipeline_inputs.pop("image")
_lowerCAmelCase : Dict = pipe.image_encoder(__a).image_embeds
_lowerCAmelCase : str = pipe(
**__a, decoder_latents=__a, super_res_latents=__a, image_embeddings=__a, ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1E-4
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_lowerCAmelCase : Optional[int] = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__a, expected_max_diff=__a)
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch_device == "cpu"
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[str] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=__a, relax_max_difference=__a, additional_params_copy_to_batched_inputs=__a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_lowerCAmelCase : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__a, additional_params_copy_to_batched_inputs=__a, )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__a)
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def snake_case__ ( self):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy")
_lowerCAmelCase : Any = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.floataa)
_lowerCAmelCase : Dict = pipeline.to(__a)
pipeline.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Dict = torch.Generator(device="cpu").manual_seed(0)
_lowerCAmelCase : Tuple = pipeline(
__a, generator=__a, output_type="np", )
_lowerCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__a, __a, 15)
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_snake_case = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( _lowerCamelCase ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return max(metric_fn(_lowerCamelCase , _lowerCamelCase ) for gt in ground_truths )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : Optional[Any] = []
if args.gold_data_mode == "qa":
_lowerCAmelCase : str = pd.read_csv(_lowerCamelCase , sep="\t" , header=_lowerCamelCase )
for answer_list in data[1]:
_lowerCAmelCase : List[Any] = ast.literal_eval(_lowerCamelCase )
answers.append(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : List[Any] = [[reference] for reference in references]
_lowerCAmelCase : List[str] = 0
for prediction, ground_truths in zip(_lowerCamelCase , _lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
fa += metric_max_over_ground_truths(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = 100.0 * em / total
_lowerCAmelCase : Any = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = args.k
_lowerCAmelCase : Union[str, Any] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : Optional[int] = [line.strip() for line in open(_lowerCamelCase , "r" ).readlines()]
_lowerCAmelCase : List[str] = 0
for hypo, reference in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Dict = set(hypo.split("\t" )[:k] )
_lowerCAmelCase : str = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase : str = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def strip_title(_lowerCamelCase ):
if title.startswith("\"" ):
_lowerCAmelCase : Any = title[1:]
if title.endswith("\"" ):
_lowerCAmelCase : Any = title[:-1]
return title
_lowerCAmelCase : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase , truncation=_lowerCamelCase , )["input_ids"].to(args.device )
_lowerCAmelCase : int = rag_model.rag.question_encoder(_lowerCamelCase )
_lowerCAmelCase : List[Any] = question_enc_outputs[0]
_lowerCAmelCase : Any = rag_model.retriever(
_lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowerCAmelCase : int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase : Tuple = []
for docs in all_docs:
_lowerCAmelCase : str = [strip_title(_lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCamelCase ) )
return provenance_strings
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCamelCase , return_tensors="pt" , padding=_lowerCamelCase , truncation=_lowerCamelCase )
_lowerCAmelCase : Any = inputs_dict.input_ids.to(args.device )
_lowerCAmelCase : Optional[int] = inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase : Tuple = rag_model.generate( # rag_model overwrites generate
_lowerCamelCase , attention_mask=_lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase : Dict = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
if args.print_predictions:
for q, a in zip(_lowerCamelCase , _lowerCamelCase ):
logger.info("Q: {} - A: {}".format(_lowerCamelCase , _lowerCamelCase ) )
return answers
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCamelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCamelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCamelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCamelCase , type=_lowerCamelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCamelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCamelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCamelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCamelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCamelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCamelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCamelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCamelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCamelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {}
if args.model_type is None:
_lowerCAmelCase : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowerCAmelCase : Tuple = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowerCAmelCase : Any = args.n_docs
if args.index_name is not None:
_lowerCAmelCase : Optional[Any] = args.index_name
if args.index_path is not None:
_lowerCAmelCase : List[Any] = args.index_path
else:
_lowerCAmelCase : List[Any] = BartForConditionalGeneration
_lowerCAmelCase : Union[str, Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCamelCase )
_lowerCAmelCase : str = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowerCAmelCase : int = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCamelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowerCAmelCase : Optional[Any] = RagRetriever.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = model_class.from_pretrained(_lowerCamelCase , retriever=_lowerCamelCase , **_lowerCamelCase )
model.retriever.init_retrieval()
else:
_lowerCAmelCase : str = model_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowerCAmelCase : Optional[Any] = []
for line in tqdm(_lowerCamelCase ):
questions.append(line.strip() )
if len(_lowerCamelCase ) == args.eval_batch_size:
_lowerCAmelCase : Any = evaluate_batch_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
preds_file.write("\n".join(_lowerCamelCase ) + "\n" )
preds_file.flush()
_lowerCAmelCase : Dict = []
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[int] = evaluate_batch_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
preds_file.write("\n".join(_lowerCamelCase ) )
preds_file.flush()
score_fn(_lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_snake_case = get_args()
main(args)
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = "▁"
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BigBirdTokenizer
lowerCamelCase__ = BigBirdTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = self.tokenizer_class(__a, keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "<s>"
_lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "[MASK]")
self.assertEqual(len(__a), 1004)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1000)
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(__a)
_lowerCAmelCase : Any = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : int = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Dict = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : str = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(__a)
_lowerCAmelCase : Any = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BigBirdTokenizer(__a, keep_accents=__a)
_lowerCAmelCase : List[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [285, 46, 10, 170, 382], )
_lowerCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "Hello World!"
_lowerCAmelCase : Any = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(__a, self.big_tokenizer.encode(__a))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_lowerCAmelCase : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__a, self.big_tokenizer.encode(__a))
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCAmelCase : Any = list(self.big_tokenizer.get_vocab().keys())[:10]
_lowerCAmelCase : str = " ".join(__a)
_lowerCAmelCase : int = self.big_tokenizer.encode_plus(__a, return_tensors="pt", return_token_type_ids=__a)
_lowerCAmelCase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=__a)
_lowerCAmelCase : Tuple = BigBirdConfig(attention_type="original_full")
_lowerCAmelCase : List[Any] = BigBirdModel(__a)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a)
model(**__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
_lowerCAmelCase : Tuple = tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids)
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]")
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="google/bigbird-roberta-base", revision="215c99f1600e06f83acce68422f2035b2b5c3510", )
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = 'dict'
lowerCamelCase__ = None
lowerCamelCase__ = field(default='Translation' , init=a , repr=a)
def __call__( self):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def snake_case__ ( self):
'''simple docstring'''
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = 'dict'
lowerCamelCase__ = None
lowerCamelCase__ = field(default='TranslationVariableLanguages' , init=a , repr=a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = sorted(set(self.languages)) if self.languages else None
_lowerCAmelCase : str = len(self.languages) if self.languages else None
def __call__( self):
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = set(self.languages)
if self.languages and set(__a) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__a) - lang_set))}) are not in valid set ({', '.join(__a)}).")
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : List[str] = []
for lang, text in translation_dict.items():
if isinstance(__a, __a):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
_lowerCAmelCase : List[str] = zip(*sorted(__a))
return {"language": languages, "translation": translations}
def snake_case__ ( self):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from copy import deepcopy
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = None):
'''simple docstring'''
if arr is None and size is not None:
_lowerCAmelCase : int = size
_lowerCAmelCase : Any = [0] * size
elif arr is not None:
self.init(__a)
else:
raise ValueError("Either arr or size must be specified")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = len(__a)
_lowerCAmelCase : List[str] = deepcopy(__a)
for i in range(1, self.size):
_lowerCAmelCase : List[Any] = self.next_(__a)
if j < self.size:
self.tree[j] += self.tree[i]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tree[:]
for i in range(self.size - 1, 0, -1):
_lowerCAmelCase : Union[str, Any] = self.next_(__a)
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
return index - (index & (-index))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowerCAmelCase : List[str] = self.next_(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
self.add(__a, value - self.get(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if right == 0:
return 0
_lowerCAmelCase : int = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowerCAmelCase : Any = self.prev(__a)
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return self.prefix(__a) - self.prefix(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.query(__a, index + 1)
def snake_case__ ( self, __a):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
_lowerCAmelCase : int = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowerCAmelCase : Optional[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
_lowerCAmelCase : Optional[Any] = nn.Parameter(_lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
_lowerCAmelCase : Dict = nn.Parameter(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = np.asarray(weights[0] )
_lowerCAmelCase : List[Any] = np.asarray(weights[1] )
_lowerCAmelCase : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCamelCase ).view(-1 , _lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.asarray(weights[0] )
_lowerCAmelCase : Any = np.asarray(weights[1] )
_lowerCAmelCase : List[str] = np.asarray(weights[2] )
_lowerCAmelCase : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , _lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowerCamelCase ).view(-1 , _lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = weights[0][0][0]
_lowerCAmelCase : str = np.asarray(layer_norm_a[0] )
_lowerCAmelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) , )
# lsh weights + output
_lowerCAmelCase : str = weights[0][1]
if len(_lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(_lowerCamelCase , torch_block.attention , _lowerCamelCase )
else:
set_layer_weights_in_torch_local(_lowerCamelCase , torch_block.attention , _lowerCamelCase )
# intermediate weighs
_lowerCAmelCase : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowerCamelCase ) == 4:
_lowerCAmelCase : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) , )
# intermediate dense
_lowerCAmelCase : List[Any] = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCamelCase ) , )
# intermediate out
_lowerCAmelCase : str = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCamelCase ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch_model.reformer
# word embeds
_lowerCAmelCase : str = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowerCamelCase ) , )
if isinstance(weights[3] , _lowerCamelCase ):
_lowerCAmelCase : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.tensor(_lowerCamelCase ) )
_lowerCAmelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# output layer norm
_lowerCAmelCase : List[Any] = np.asarray(weights[7][0] )
_lowerCAmelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowerCamelCase ) , torch.tensor(_lowerCamelCase ) , )
# output embeddings
_lowerCAmelCase : str = np.asarray(weights[9][0] )
_lowerCAmelCase : Any = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowerCamelCase ) , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = ReformerConfig.from_json_file(_lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : str = ReformerModelWithLMHead(_lowerCamelCase )
with open(_lowerCamelCase , "rb" ) as f:
_lowerCAmelCase : Any = pickle.load(_lowerCamelCase )["weights"]
set_model_weights_in_torch(_lowerCamelCase , _lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Dict = flatten_dict(_lowerCamelCase )
return flax_params
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : str = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
_lowerCAmelCase : int = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_lowerCAmelCase : Optional[int] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_lowerCAmelCase : Any = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_lowerCAmelCase : List[Any] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_lowerCAmelCase : Dict = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCamelCase )
_lowerCAmelCase : Dict = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_lowerCAmelCase : Optional[int] = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCamelCase )
_lowerCAmelCase : Dict = flax_dict[key]
_lowerCAmelCase : Optional[int] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_lowerCAmelCase : Any = torch.from_numpy(converted_dict[key].T )
else:
_lowerCAmelCase : List[str] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : str = get_flax_param(_lowerCamelCase )
if not use_large:
_lowerCAmelCase : Optional[Any] = PixaStructVisionConfig()
_lowerCAmelCase : Optional[int] = PixaStructTextConfig()
else:
_lowerCAmelCase : Optional[int] = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_lowerCAmelCase : Dict = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_lowerCAmelCase : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCamelCase )
_lowerCAmelCase : List[str] = PixaStructForConditionalGeneration(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = rename_and_convert_flax_params(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
_lowerCAmelCase : Tuple = PixaStructImageProcessor()
_lowerCAmelCase : Dict = PixaStructProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
if use_large:
_lowerCAmelCase : Tuple = 4_096
_lowerCAmelCase : Union[str, Any] = True
# mkdir if needed
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
print("Model saved in {}".format(_lowerCamelCase ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_snake_case = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowerCamelCase ):
'''simple docstring'''
if "model" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("model." , "" )
if "norm1" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
_lowerCAmelCase : int = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
_lowerCAmelCase : Optional[int] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
_lowerCAmelCase : Optional[int] = orig_key.split("." )[0].split("_" )[-1]
_lowerCAmelCase : Dict = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
_lowerCAmelCase : str = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
_lowerCAmelCase : List[str] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
_lowerCAmelCase : Optional[Any] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
_lowerCAmelCase : List[Any] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
_lowerCAmelCase : Any = "yoso." + orig_key
return orig_key
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[Any] = orig_state_dict.pop(_lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase : Any = val
_lowerCAmelCase : Any = orig_state_dict["cls.predictions.decoder.bias"]
_lowerCAmelCase : Optional[int] = torch.arange(_lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )["model_state_dict"]
_lowerCAmelCase : Optional[int] = YosoConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : List[str] = YosoForMaskedLM(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowerCamelCase )
print(model.load_state_dict(_lowerCamelCase ) )
model.eval()
model.save_pretrained(_lowerCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=32, __a=16, __a=3, __a=True, __a=True, __a=32, __a=4, __a=[0, 1, 2, 3], __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=0.02, __a=3, __a=[1, 384, 24, 24], __a=True, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = backbone_out_indices
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Union[str, Any] = backbone_featmap_shape
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Any = (image_size // patch_size) ** 2
_lowerCAmelCase : Optional[int] = num_patches + 1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, backbone_out_indices=self.backbone_out_indices, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__a, initializer_range=self.initializer_range, is_hybrid=self.is_hybrid, backbone_config=__a, backbone_featmap_shape=self.backbone_featmap_shape, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = DPTModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Tuple = DPTForDepthEstimation(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : int = model(__a)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : str = DPTForSemanticSegmentation(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Tuple = model(__a, labels=__a)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DPTModelTester(self)
_lowerCAmelCase : int = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(__a)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a, nn.Linear))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(__a)
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = True
if model_class in get_values(__a):
continue
_lowerCAmelCase : Any = model_class(__a)
model.to(__a)
model.train()
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(__a, __a, return_labels=__a)
_lowerCAmelCase : int = model(**__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = False
_lowerCAmelCase : Any = True
if model_class in get_values(__a) or not model_class.supports_gradient_checkpointing:
continue
_lowerCAmelCase : Optional[Any] = model_class(__a)
model.to(__a)
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : List[str] = self._prepare_for_class(__a, __a, return_labels=__a)
_lowerCAmelCase : List[Any] = model(**__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = _config_zero_init(__a)
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(config=__a)
# Skip the check for the backbone
_lowerCAmelCase : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_lowerCAmelCase : Dict = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_lowerCAmelCase : List[Any] = DPTModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = "add"
with self.assertRaises(__a):
_lowerCAmelCase : Dict = DPTForDepthEstimation(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas")
_lowerCAmelCase : Optional[int] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(__a)
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=__a, return_tensors="pt").to(__a)
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**__a)
_lowerCAmelCase : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
_lowerCAmelCase : List[str] = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape, __a)
_lowerCAmelCase : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]]).to(__a)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100, __a, atol=1E-4))
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=3, __a=4, __a=2, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=36, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=6, __a=6, __a=3, __a=4, __a=None, __a=1000, ):
'''simple docstring'''
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Any = shape_size
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : int = num_choices
_lowerCAmelCase : Any = scope
_lowerCAmelCase : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : int = text_seq_length
_lowerCAmelCase : str = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size)
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox)
_lowerCAmelCase : Dict = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : str = bbox[i, j, 3]
_lowerCAmelCase : Tuple = bbox[i, j, 1]
_lowerCAmelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : List[Any] = bbox[i, j, 2]
_lowerCAmelCase : Any = bbox[i, j, 0]
_lowerCAmelCase : Tuple = tmp_coordinate
_lowerCAmelCase : Dict = tf.constant(__a)
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.text_seq_length])
_lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size)
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels)
_lowerCAmelCase : Optional[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = TFLayoutLMvaModel(config=__a)
# text + image
_lowerCAmelCase : Tuple = model(__a, pixel_values=__a, training=__a)
_lowerCAmelCase : Optional[Any] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, training=__a, )
_lowerCAmelCase : List[str] = model(__a, bbox=__a, pixel_values=__a, training=__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# text only
_lowerCAmelCase : int = model(__a, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_lowerCAmelCase : Any = model({"pixel_values": pixel_values}, training=__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : List[Any] = TFLayoutLMvaForSequenceClassification(config=__a)
_lowerCAmelCase : List[str] = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : str = TFLayoutLMvaForTokenClassification(config=__a)
_lowerCAmelCase : Any = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, labels=__a, training=__a, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : List[str] = TFLayoutLMvaForQuestionAnswering(config=__a)
_lowerCAmelCase : Any = model(
__a, bbox=__a, pixel_values=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, training=__a, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(_lowerCAmelCase) : Tuple = config_and_inputs
_lowerCAmelCase : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
return True
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : int = copy.deepcopy(__a)
if model_class in get_values(__a):
_lowerCAmelCase : int = {
k: tf.tile(tf.expand_dims(__a, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(__a, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Dict = tf.ones(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : List[Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
_lowerCAmelCase : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
elif model_class in get_values(__a):
_lowerCAmelCase : Optional[int] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa)
return inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFLayoutLMvaModelTester(self)
_lowerCAmelCase : Any = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(__a)
if getattr(__a, "hf_compute_loss", __a):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=__a)[0]
]
_lowerCAmelCase : Any = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase : str = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : List[str] = prepared_for_class.pop("input_ids")
_lowerCAmelCase : Tuple = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : Union[str, Any] = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
_lowerCAmelCase : Union[str, Any] = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_lowerCAmelCase : int = -100
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(__a)
_lowerCAmelCase : int = model(__a, **__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
_lowerCAmelCase : int = model(__a)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase : Tuple = self._prepare_for_class(inputs_dict.copy(), __a, return_labels=__a)
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase : Any = prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase : List[str] = inspect.signature(model.call).parameters
_lowerCAmelCase : List[Any] = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase : str = {0: "input_ids"}
for label_key in label_keys:
_lowerCAmelCase : int = signature_names.index(__a)
_lowerCAmelCase : str = label_key
_lowerCAmelCase : Tuple = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase : Optional[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase : Tuple = prepared_for_class[value]
_lowerCAmelCase : Any = tuple(__a)
# Send to model
_lowerCAmelCase : Tuple = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(__a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a, __a, __a, __a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a, __a, __a, __a, __a, __a, __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a) if is_vision_available() else None
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=__a, return_tensors="tf").pixel_values
_lowerCAmelCase : Tuple = tf.constant([[1, 2]])
_lowerCAmelCase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0)
# forward pass
_lowerCAmelCase : int = model(input_ids=__a, bbox=__a, pixel_values=__a, training=__a)
# verify the logits
_lowerCAmelCase : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape, __a)
_lowerCAmelCase : int = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], __a, atol=1E-4))
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
from __future__ import annotations
from math import pi
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = inspect.getfile(accelerate.test_utils)
lowerCamelCase__ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_cli.py'])
lowerCamelCase__ = ['accelerate', 'launch']
lowerCamelCase__ = Path.home() / '.cache/huggingface/accelerate'
lowerCamelCase__ = 'default_config.yaml'
lowerCamelCase__ = config_folder / config_file
lowerCamelCase__ = config_folder / '_default_config.yaml'
lowerCamelCase__ = Path('tests/test_configs')
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy())
def snake_case__ ( self):
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml")):
with self.subTest(config_file=__a):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(__a), self.test_file_path], env=os.environ.copy())
def snake_case__ ( self):
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"], env=os.environ.copy())
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = 'test-tpu'
lowerCamelCase__ = 'us-central1-a'
lowerCamelCase__ = 'ls'
lowerCamelCase__ = ['accelerate', 'tpu-config']
lowerCamelCase__ = 'cd /usr/share'
lowerCamelCase__ = 'tests/test_samples/test_command_file.sh'
lowerCamelCase__ = 'Running gcloud compute tpus tpu-vm ssh'
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"], return_stdout=__a)
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
], return_stdout=__a, )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all", __a, )
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=99, __a=32, __a=5, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=50, __a=0.02, __a=True, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = use_input_mask
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Union[str, Any] = scope
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Dict = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length])
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=__a, initializer_range=self.initializer_range, )
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase : int = True
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case__ ( self, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BertGenerationEncoder(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = model(__a, attention_mask=__a)
_lowerCAmelCase : str = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = BertGenerationEncoder(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Dict = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, )
_lowerCAmelCase : List[str] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Dict = BertGenerationDecoder(config=__a).to(__a).eval()
# first forward pass
_lowerCAmelCase : List[str] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, use_cache=__a, )
_lowerCAmelCase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowerCAmelCase : Tuple = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
_lowerCAmelCase : List[Any] = torch.cat([input_ids, next_tokens], dim=-1)
_lowerCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1)
_lowerCAmelCase : Optional[int] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, output_hidden_states=__a, )["hidden_states"][0]
_lowerCAmelCase : Optional[int] = model(
__a, attention_mask=__a, encoder_hidden_states=__a, encoder_attention_mask=__a, past_key_values=__a, output_hidden_states=__a, )["hidden_states"][0]
# select random slice
_lowerCAmelCase : int = ids_tensor((1,), output_from_past.shape[-1]).item()
_lowerCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a, __a, atol=1E-3))
def snake_case__ ( self, __a, __a, __a, __a, *__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = BertGenerationDecoder(__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Any = model(__a, attention_mask=__a, labels=__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , a , unittest.TestCase):
lowerCamelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCamelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = BertGenerationEncoderTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Optional[int] = "bert"
self.model_tester.create_and_check_model(__a, __a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a)
def snake_case__ ( self):
'''simple docstring'''
(
_lowerCAmelCase
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase : str = None
self.model_tester.create_and_check_model_as_decoder(
__a, __a, __a, __a, __a, __a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
self.assertIsNotNone(__a)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
_lowerCAmelCase : Union[str, Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]])
with torch.no_grad():
_lowerCAmelCase : Any = model(__a)[0]
_lowerCAmelCase : Any = torch.Size([1, 8, 1024])
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Dict = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]])
self.assertTrue(torch.allclose(output[:, :3, :3], __a, atol=1E-4))
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
_lowerCAmelCase : List[str] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]])
with torch.no_grad():
_lowerCAmelCase : List[str] = model(__a)[0]
_lowerCAmelCase : str = torch.Size([1, 8, 5_0358])
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]])
self.assertTrue(torch.allclose(output[:, :3, :3], __a, atol=1E-4))
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_snake_case = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_snake_case = []
_snake_case = []
_snake_case = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_snake_case = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
_snake_case = 0
for log in Path().glob("*.log"):
_snake_case = 0
with open(log, "r") as f:
for line in f:
_snake_case = json.loads(line)
if line.get("nodeid", "") != "":
_snake_case = line["nodeid"]
if line.get("duration", None) is not None:
_snake_case = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_snake_case = []
log.unlink()
_snake_case = ""
_snake_case = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_snake_case = []
_snake_case = {}
for test in failed_tests:
_snake_case = test[0].split("::")
_snake_case = data[0].split("/")[-1]
if data[0] not in filesafailed:
_snake_case = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_snake_case = [test[0] for test in failed_table]
_snake_case = list(set(files))
# Count number of instances in failed_tests
_snake_case = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_snake_case = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_snake_case = "Too many failed tests, please see the full report in the Action results."
_snake_case = len(err) + 10
_snake_case = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
_snake_case = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_snake_case = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_snake_case = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
_snake_case = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_snake_case = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_snake_case = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_snake_case = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_snake_case = row[0]
else:
_snake_case = ""
_snake_case = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_snake_case = "base_with_context"
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
_lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : Optional[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Dict = ly_weight["attention"]
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
_lowerCAmelCase : str = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
_lowerCAmelCase : List[str] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : List[Any] = ly_weight["attention"]
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowerCAmelCase : List[Any] = weights[F"layers_{lyr_num}"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : int = ly_weight["self_attention"]
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : int = ly_weight["MultiHeadDotProductAttention_0"]
_lowerCAmelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
_lowerCAmelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
_lowerCAmelCase : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
_lowerCAmelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowerCAmelCase : Optional[int] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
_lowerCAmelCase : int = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
_lowerCAmelCase : Any = os.path.join(args.checkpoint_path , ".." , "config.gin" )
_lowerCAmelCase : Dict = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
_lowerCAmelCase : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
_lowerCAmelCase : List[str] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
_lowerCAmelCase : Dict = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowerCAmelCase : List[str] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
_lowerCAmelCase : int = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
_lowerCAmelCase : List[str] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
_lowerCAmelCase : List[str] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
_lowerCAmelCase : int = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
_snake_case = parser.parse_args()
main(args)
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
from __future__ import annotations
_snake_case = "#"
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : dict = {}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._trie
for char in text:
if char not in trie:
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : str = trie[char]
_lowerCAmelCase : str = True
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._trie
for char in prefix:
if char in trie:
_lowerCAmelCase : List[Any] = trie[char]
else:
return []
return self._elements(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for c, v in d.items():
_lowerCAmelCase : List[Any] = [" "] if c == END else [(c + s) for s in self._elements(__a)]
result.extend(__a)
return tuple(__a)
_snake_case = Trie()
_snake_case = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = trie.find_word(_lowerCamelCase )
return tuple(string + word for word in suffixes )
def A ( ):
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_snake_case = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
_lowerCAmelCase : int = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Dict = value
elif weight_type == "bias":
_lowerCAmelCase : int = value
elif weight_type == "running_mean":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "running_var":
_lowerCAmelCase : Tuple = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : Tuple = value
elif weight_type == "inv_freq":
_lowerCAmelCase : List[Any] = value
else:
_lowerCAmelCase : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
_lowerCAmelCase : Dict = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCAmelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Union[str, Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCAmelCase : str = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCAmelCase : Tuple = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCAmelCase : Optional[Any] = None
elif "pos_bias_v" in name:
_lowerCAmelCase : List[Any] = None
elif "weight_g" in name:
_lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
_lowerCAmelCase : int = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Any = "weight"
elif "running_mean" in name:
_lowerCAmelCase : List[str] = "running_mean"
elif "inv_freq" in name:
_lowerCAmelCase : Any = "inv_freq"
elif "running_var" in name:
_lowerCAmelCase : List[str] = "running_var"
elif "num_batches_tracked" in name:
_lowerCAmelCase : Optional[int] = "num_batches_tracked"
else:
_lowerCAmelCase : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = full_name.split("conv_layers." )[-1]
_lowerCAmelCase : Any = name.split("." )
_lowerCAmelCase : Optional[int] = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : int = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCAmelCase : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCAmelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCAmelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Dict = target_dict.pad_index
_lowerCAmelCase : Optional[Any] = target_dict.bos_index
_lowerCAmelCase : List[Any] = target_dict.eos_index
_lowerCAmelCase : List[str] = len(target_dict.symbols )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : str = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCAmelCase : Dict = True if config.feat_extract_norm == "layer" else False
_lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : Dict = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCAmelCase : int = argparse.Namespace(task="audio_pretraining" )
_lowerCAmelCase : Dict = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_snake_case = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_snake_case = numpy.array([0, 0])
_snake_case = numpy.array([0.5, 0.8660254])
_snake_case = numpy.array([1, 0])
_snake_case = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = initial_vectors
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = iteration_step(_lowerCamelCase )
return vectors
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCAmelCase : Optional[Any] = vectors[i + 1]
new_vectors.append(_lowerCamelCase )
_lowerCAmelCase : Tuple = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = numpy.radians(_lowerCamelCase )
_lowerCAmelCase : List[Any] = numpy.cos(_lowerCamelCase ), numpy.sin(_lowerCamelCase )
_lowerCAmelCase : str = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCAmelCase : Dict = zip(*_lowerCamelCase )
plt.plot(_lowerCamelCase , _lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 719 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 0 |
import csv
import tweepy
# Twitter API credentials
_snake_case = ""
_snake_case = ""
_snake_case = ""
_snake_case = ""
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tweepy.OAuthHandler(_lowerCamelCase , _lowerCamelCase )
auth.set_access_token(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = tweepy.API(_lowerCamelCase )
# initialize a list to hold all the tweepy Tweets
_lowerCAmelCase : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_lowerCAmelCase : List[Any] = api.user_timeline(screen_name=_lowerCamelCase , count=200 )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# save the id of the oldest tweet less one
_lowerCAmelCase : Dict = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_lowerCamelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_lowerCAmelCase : List[str] = api.user_timeline(
screen_name=_lowerCamelCase , count=200 , max_id=_lowerCamelCase )
# save most recent tweets
alltweets.extend(_lowerCamelCase )
# update the id of the oldest tweet less one
_lowerCAmelCase : List[str] = alltweets[-1].id - 1
print(F"...{len(_lowerCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_lowerCAmelCase : int = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , "w" ) as f:
_lowerCAmelCase : Union[str, Any] = csv.writer(_lowerCamelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_lowerCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
_lowerCAmelCase : Optional[Any] = hex_num[0] == "-"
if is_negative:
_lowerCAmelCase : List[str] = hex_num[1:]
try:
_lowerCAmelCase : Tuple = int(_lowerCamelCase , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
_lowerCAmelCase : List[Any] = ""
while int_num > 0:
_lowerCAmelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'switch_transformers'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self, __a=3_2128, __a=768, __a=64, __a=2048, __a=64, __a=12, __a=3, __a=12, __a=3, __a=12, __a=8, __a=False, __a=0.01, __a="float32", __a=False, __a=32, __a=128, __a=0.1, __a=1E-6, __a=0.001, __a=0.001, __a=1.0, __a="relu", __a=True, __a=False, __a=True, __a=0, __a=1, **__a, ):
'''simple docstring'''
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : str = d_model
_lowerCAmelCase : Optional[Any] = d_kv
_lowerCAmelCase : Dict = d_ff
_lowerCAmelCase : List[str] = num_sparse_encoder_layers
_lowerCAmelCase : Tuple = num_layers
_lowerCAmelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCAmelCase : Any = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCAmelCase : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCAmelCase : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : int = num_experts
_lowerCAmelCase : str = expert_capacity
_lowerCAmelCase : int = router_bias
_lowerCAmelCase : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
_lowerCAmelCase : str = router_dtype
_lowerCAmelCase : Tuple = router_ignore_padding_tokens
_lowerCAmelCase : Dict = relative_attention_num_buckets
_lowerCAmelCase : List[Any] = relative_attention_max_distance
_lowerCAmelCase : Any = dropout_rate
_lowerCAmelCase : str = layer_norm_epsilon
_lowerCAmelCase : List[str] = initializer_factor
_lowerCAmelCase : Any = feed_forward_proj
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : str = add_router_probs
_lowerCAmelCase : List[Any] = router_z_loss_coef
_lowerCAmelCase : Optional[int] = router_aux_loss_coef
_lowerCAmelCase : Optional[Any] = self.feed_forward_proj.split("-")
_lowerCAmelCase : Any = act_info[-1]
_lowerCAmelCase : List[str] = act_info[0] == "gated"
if len(__a) > 1 and act_info[0] != "gated" or len(__a) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase : Optional[Any] = "gelu_new"
super().__init__(
pad_token_id=__a, eos_token_id=__a, is_encoder_decoder=__a, **__a, )
| 701 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 702 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_lowerCAmelCase : Dict = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : Tuple = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
_lowerCAmelCase : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : Any = 2.0 * image - 1.0
_lowerCAmelCase : str = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase : Optional[Any] = torch.cat(_lowerCamelCase , dim=0 )
return image
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0.99_95 ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[Any] = va.device
_lowerCAmelCase : Dict = va.cpu().numpy()
_lowerCAmelCase : str = va.cpu().numpy()
_lowerCAmelCase : Dict = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
_lowerCAmelCase : Dict = (1 - t) * va + t * va
else:
_lowerCAmelCase : str = np.arccos(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = theta_a * t
_lowerCAmelCase : Tuple = np.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase : Optional[int] = sin_theta_t / sin_theta_a
_lowerCAmelCase : Optional[int] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase : List[Any] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F.normalize(_lowerCamelCase , dim=-1 )
_lowerCAmelCase : Tuple = F.normalize(_lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for param in model.parameters():
_lowerCAmelCase : Any = value
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a, __a, __a, __a, __a=None, __a=None, __a=None, ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a, text_encoder=__a, clip_model=__a, tokenizer=__a, unet=__a, scheduler=__a, feature_extractor=__a, coca_model=__a, coca_tokenizer=__a, coca_transform=__a, )
_lowerCAmelCase : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size, __a)
else feature_extractor.size["shortest_edge"]
)
_lowerCAmelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, __a)
set_requires_grad(self.clip_model, __a)
def snake_case__ ( self, __a = "auto"):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def snake_case__ ( self):
'''simple docstring'''
self.enable_attention_slicing(__a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.vae, __a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.vae, __a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.unet, __a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.unet, __a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(int(num_inference_steps * strength), __a)
_lowerCAmelCase : List[str] = max(num_inference_steps - init_timestep, 0)
_lowerCAmelCase : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case__ ( self, __a, __a, __a, __a, __a, __a=None):
'''simple docstring'''
if not isinstance(__a, torch.Tensor):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(__a)}")
_lowerCAmelCase : List[str] = image.to(device=__a, dtype=__a)
if isinstance(__a, __a):
_lowerCAmelCase : Any = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__a)
]
_lowerCAmelCase : Optional[Any] = torch.cat(__a, dim=0)
else:
_lowerCAmelCase : Any = self.vae.encode(__a).latent_dist.sample(__a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : List[str] = 0.18_215 * init_latents
_lowerCAmelCase : List[Any] = init_latents.repeat_interleave(__a, dim=0)
_lowerCAmelCase : Union[str, Any] = randn_tensor(init_latents.shape, generator=__a, device=__a, dtype=__a)
# get latents
_lowerCAmelCase : Union[str, Any] = self.scheduler.add_noise(__a, __a, __a)
_lowerCAmelCase : Optional[int] = init_latents
return latents
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.coca_transform(__a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase : Optional[int] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowerCAmelCase : Tuple = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.feature_extractor.preprocess(__a)
_lowerCAmelCase : Tuple = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
_lowerCAmelCase : int = self.clip_model.get_image_features(__a)
_lowerCAmelCase : str = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=__a)
_lowerCAmelCase : Union[str, Any] = image_embeddings_clip.repeat_interleave(__a, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = latents.detach().requires_grad_()
_lowerCAmelCase : Any = self.scheduler.scale_model_input(__a, __a)
# predict the noise residual
_lowerCAmelCase : Union[str, Any] = self.unet(__a, __a, encoder_hidden_states=__a).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowerCAmelCase : int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase : Optional[int] = torch.sqrt(__a)
_lowerCAmelCase : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, __a):
_lowerCAmelCase : List[Any] = self.scheduler.sigmas[index]
_lowerCAmelCase : int = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : Any = 1 / 0.18_215 * sample
_lowerCAmelCase : Optional[Any] = self.vae.decode(__a).sample
_lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : str = transforms.Resize(self.feature_extractor_size)(__a)
_lowerCAmelCase : Any = self.normalize(__a).to(latents.dtype)
_lowerCAmelCase : Tuple = self.clip_model.get_image_features(__a)
_lowerCAmelCase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=__a)
_lowerCAmelCase : Any = spherical_dist_loss(__a, __a).mean() * clip_guidance_scale
_lowerCAmelCase : str = -torch.autograd.grad(__a, __a)[0]
if isinstance(self.scheduler, __a):
_lowerCAmelCase : Tuple = latents.detach() + grads * (sigma**2)
_lowerCAmelCase : List[Any] = noise_pred_original
else:
_lowerCAmelCase : Optional[int] = noise_pred_original - torch.sqrt(__a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, __a, __a, __a = None, __a = None, __a = 512, __a = 512, __a = 0.6, __a = 50, __a = 7.5, __a = 1, __a = 0.0, __a = 100, __a = None, __a = "pil", __a = True, __a = 0.8, __a = 0.1, __a = 0.1, ):
'''simple docstring'''
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(__a)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(__a, torch.Generator) and batch_size > 1:
_lowerCAmelCase : Dict = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase : Dict = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_lowerCAmelCase : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase : Optional[int] = ", ".join(__a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_lowerCAmelCase : List[str] = self.get_image_description(__a)
if style_prompt is None:
if len(__a):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_lowerCAmelCase : List[str] = self.get_image_description(__a)
# get prompt text embeddings for content and style
_lowerCAmelCase : Optional[Any] = self.tokenizer(
__a, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowerCAmelCase : int = self.tokenizer(
__a, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowerCAmelCase : str = slerp(__a, __a, __a)
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase : Union[str, Any] = text_embeddings.repeat_interleave(__a, dim=0)
# set timesteps
_lowerCAmelCase : Tuple = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowerCAmelCase : List[str] = {}
if accepts_offset:
_lowerCAmelCase : Optional[int] = 1
self.scheduler.set_timesteps(__a, **__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowerCAmelCase : Union[str, Any] = self.get_timesteps(__a, __a, self.device)
_lowerCAmelCase : Dict = timesteps[:1].repeat(__a)
# Preprocess image
_lowerCAmelCase : Optional[int] = preprocess(__a, __a, __a)
_lowerCAmelCase : Any = self.prepare_latents(
__a, __a, __a, text_embeddings.dtype, self.device, __a)
_lowerCAmelCase : List[Any] = preprocess(__a, __a, __a)
_lowerCAmelCase : str = self.prepare_latents(
__a, __a, __a, text_embeddings.dtype, self.device, __a)
_lowerCAmelCase : Tuple = slerp(__a, __a, __a)
if clip_guidance_scale > 0:
_lowerCAmelCase : List[str] = self.get_clip_image_embeddings(__a, __a)
_lowerCAmelCase : Optional[Any] = self.get_clip_image_embeddings(__a, __a)
_lowerCAmelCase : Optional[int] = slerp(
__a, __a, __a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : str = content_text_input.input_ids.shape[-1]
_lowerCAmelCase : Union[str, Any] = self.tokenizer([""], padding="max_length", max_length=__a, return_tensors="pt")
_lowerCAmelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase : Optional[int] = uncond_embeddings.repeat_interleave(__a, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase : List[str] = torch.randn(__a, generator=__a, device="cpu", dtype=__a).to(
self.device)
else:
_lowerCAmelCase : Optional[Any] = torch.randn(__a, generator=__a, device=self.device, dtype=__a)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
_lowerCAmelCase : Dict = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : List[Any] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : List[Any] = eta
# check if the scheduler accepts generator
_lowerCAmelCase : Optional[int] = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowerCAmelCase : Optional[int] = generator
with self.progress_bar(total=__a):
for i, t in enumerate(__a):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowerCAmelCase : List[str] = self.scheduler.scale_model_input(__a, __a)
# predict the noise residual
_lowerCAmelCase : List[Any] = self.unet(__a, __a, encoder_hidden_states=__a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : List[Any] = noise_pred.chunk(2)
_lowerCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase : str = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase : str = self.cond_fn(
__a, __a, __a, __a, __a, __a, __a, )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(__a, __a, __a, **__a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : Tuple = 1 / 0.18_215 * latents
_lowerCAmelCase : Optional[Any] = self.vae.decode(__a).sample
_lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(__a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a, nsfw_content_detected=__a)
| 703 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BertTokenizer
lowerCamelCase__ = BertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : Dict = "unwanted, running"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file)
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__a, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), [9, 6, 7, 12, 10, 11])
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = "UNwant\u00E9d,running"
_lowerCAmelCase : List[str] = tokenizer.tokenize(__a)
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[str] = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : str = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = tokenizer.encode(__a)
_lowerCAmelCase : Any = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
# With lower casing
_lowerCAmelCase : Optional[Any] = self.get_tokenizer(do_lower_case=__a)
_lowerCAmelCase : Dict = self.get_rust_tokenizer(do_lower_case=__a)
_lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(__a)
_lowerCAmelCase : List[str] = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[str] = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : List[str] = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(__a)
_lowerCAmelCase : Dict = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=__a, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BasicTokenizer()
_lowerCAmelCase : Optional[int] = "a\n'll !!to?'d of, can't."
_lowerCAmelCase : int = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_lowerCAmelCase : int = {}
for i, token in enumerate(__a):
_lowerCAmelCase : Dict = i
_lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=__a, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("bert-base-uncased")
_lowerCAmelCase : str = tokenizer.encode("sequence builders", add_special_tokens=__a)
_lowerCAmelCase : List[str] = tokenizer.encode("multi-sequence build", add_special_tokens=__a)
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__a)
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__a, __a)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : str = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_lowerCAmelCase : Any = tokenizer_r.encode_plus(
__a, return_attention_mask=__a, return_token_type_ids=__a, return_offsets_mapping=__a, add_special_tokens=__a, )
_lowerCAmelCase : Optional[int] = tokenizer_r.do_lower_case if hasattr(__a, "do_lower_case") else False
_lowerCAmelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ["的", "人", "有"]
_lowerCAmelCase : Optional[int] = "".join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[Any] = tokenizer_p.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : List[Any] = tokenizer_r.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(__a)
_lowerCAmelCase : int = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a, __a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : Optional[int] = tokenizer_r.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Any = tokenizer_p.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : str = tokenizer_r.convert_ids_to_tokens(__a)
_lowerCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : Any = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a, __a)
self.assertListEqual(__a, __a)
| 706 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_snake_case = ["bert-base-uncased", "bert-base-cased"]
_snake_case = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Any = TFAutoModel.from_config(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(__a)
_lowerCAmelCase : Tuple = self.bert(**__a)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[int] = [
BertTokenizer.from_pretrained(__a) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase : Any = [TFBertTokenizer.from_pretrained(__a) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__a, use_fast_bert_tokenizer=__a)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_lowerCAmelCase : Optional[Any] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_lowerCAmelCase : Any = list(zip(self.test_sentences, self.test_sentences[::-1]))
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase : str = tokenizer(__a, return_tensors="tf", padding="longest")
_lowerCAmelCase : str = tf_tokenizer(__a)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.intaa) == tf_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Optional[Any] = tf_tokenizer(self.paired_sentences)
_lowerCAmelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.intaa) == separated_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Optional[int] = tf.function(__a)
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase : Optional[Any] = tf.constant(__a)
_lowerCAmelCase : str = compiled_tokenizer(__a)
_lowerCAmelCase : str = tf_tokenizer(__a)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : str = ModelToSave(tokenizer=__a)
_lowerCAmelCase : Dict = tf.convert_to_tensor(self.test_sentences)
_lowerCAmelCase : str = model(__a) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Union[str, Any] = Path(__a) / "saved.model"
model.save(__a)
_lowerCAmelCase : Dict = tf.keras.models.load_model(__a)
_lowerCAmelCase : int = loaded_model(__a)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)), 1E-5)
| 707 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 708 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 0 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_snake_case = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_snake_case = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}), homepage="https://github.com/hendrycks/math", codebase_urls=["https://github.com/hendrycks/math"], )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = 0.0
for i, j in zip(__a, __a):
n_correct += 1.0 if math_equivalence.is_equiv(__a, __a) else 0.0
_lowerCAmelCase : Dict = n_correct / len(__a)
return {
"accuracy": accuracy,
}
| 710 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = int(number**0.5 )
return number == sq * sq
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowerCAmelCase : int = x_den * y_den * z_den
_lowerCAmelCase : int = gcd(_lowerCamelCase , _lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A ( _lowerCamelCase = 35 ):
'''simple docstring'''
_lowerCAmelCase : set = set()
_lowerCAmelCase : int
_lowerCAmelCase : Fraction = Fraction(0 )
_lowerCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowerCAmelCase : int = x_num * y_den + x_den * y_num
_lowerCAmelCase : List[str] = x_den * y_den
_lowerCAmelCase : str = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Union[str, Any] = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
_lowerCAmelCase : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowerCAmelCase : Tuple = x_den * x_den * y_den * y_den
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
_lowerCAmelCase : str = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : Any = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Optional[int] = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=-1
_lowerCAmelCase : int = x_num * y_num
_lowerCAmelCase : Dict = x_den * y_num + x_num * y_den
_lowerCAmelCase : Union[str, Any] = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Dict = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
_lowerCAmelCase : List[str] = x_num * x_num * y_num * y_num
_lowerCAmelCase : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Optional[int] = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
for num, den in unique_s:
total += Fraction(_lowerCamelCase , _lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = args.pruning_method
_lowerCAmelCase : Dict = args.threshold
_lowerCAmelCase : Any = args.model_name_or_path.rstrip("/" )
_lowerCAmelCase : str = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
_lowerCAmelCase : Any = torch.load(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
_lowerCAmelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_lowerCAmelCase : Optional[Any] = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
_lowerCAmelCase : Dict = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
_lowerCAmelCase : Any = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
_lowerCAmelCase : str = MagnitudeBinarizer.apply(inputs=_lowerCamelCase , threshold=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_lowerCAmelCase : Optional[Any] = name[:-6]
_lowerCAmelCase : List[Any] = model[F"{prefix_}mask_scores"]
_lowerCAmelCase : int = TopKBinarizer.apply(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_lowerCAmelCase : int = name[:-6]
_lowerCAmelCase : Optional[Any] = model[F"{prefix_}mask_scores"]
_lowerCAmelCase : str = ThresholdBinarizer.apply(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_lowerCAmelCase : int = name[:-6]
_lowerCAmelCase : Optional[int] = model[F"{prefix_}mask_scores"]
_lowerCAmelCase : List[Any] = -0.1, 1.1
_lowerCAmelCase : Union[str, Any] = torch.sigmoid(_lowerCamelCase )
_lowerCAmelCase : Tuple = s * (r - l) + l
_lowerCAmelCase : Any = s_bar.clamp(min=0.0 , max=1.0 )
_lowerCAmelCase : Any = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_lowerCAmelCase : Optional[Any] = os.path.join(
os.path.dirname(_lowerCamelCase ) , F"bertarized_{os.path.basename(_lowerCamelCase )}" )
if not os.path.isdir(_lowerCamelCase ):
shutil.copytree(_lowerCamelCase , _lowerCamelCase )
print(F"\nCreated folder {target_model_path}" )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_snake_case = parser.parse_args()
main(args)
| 713 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
_snake_case = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
_snake_case = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Optional[int] = char
_lowerCAmelCase : Dict = set(_lowerCamelCase )
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, __a, __a, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", **__a, ):
'''simple docstring'''
super().__init__(
bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : str = vocab_file
_lowerCAmelCase : Dict = merges_file
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Optional[int] = 3
self.add_from_file(__a)
_lowerCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : Union[str, Any] = merges_handle.read().split("\n")[:-1]
_lowerCAmelCase : Any = [tuple(merge.split()[:-1]) for merge in merges]
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[Any] = {}
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : List[Any] = tuple(__a)
_lowerCAmelCase : Optional[Any] = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_lowerCAmelCase : List[str] = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Tuple = bigram
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = 0
while i < len(__a):
try:
_lowerCAmelCase : Tuple = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : int = tuple(__a)
_lowerCAmelCase : Dict = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Optional[int] = get_pairs(__a)
_lowerCAmelCase : str = "@@ ".join(__a)
_lowerCAmelCase : Union[str, Any] = word[:-4]
_lowerCAmelCase : int = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = re.findall(R"\S+\n?", __a)
for token in words:
split_tokens.extend(list(self.bpe(__a).split(" ")))
return split_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a, self.unk_token)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = " ".join(__a).replace("@@ ", "").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Dict = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
if os.path.abspath(self.merges_file) != os.path.abspath(__a):
copyfile(self.merges_file, __a)
return out_vocab_file, out_merge_file
def snake_case__ ( self, __a):
'''simple docstring'''
if isinstance(__a, __a):
try:
with open(__a, "r", encoding="utf-8") as fd:
self.add_from_file(__a)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
return
_lowerCAmelCase : Any = f.readlines()
for lineTmp in lines:
_lowerCAmelCase : Any = lineTmp.strip()
_lowerCAmelCase : str = line.rfind(" ")
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
_lowerCAmelCase : List[Any] = line[:idx]
_lowerCAmelCase : List[str] = len(self.encoder)
| 715 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 716 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = int(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = t // 3_600, (t // 60) % 60, t % 60
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=300 ):
'''simple docstring'''
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowerCAmelCase : Optional[int] = F"{elt:.6f}" if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(_lowerCamelCase )
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase_ :
lowerCamelCase__ = 5
lowerCamelCase__ = 0.2
def __init__( self, __a, __a = None, __a = True, __a = None, __a = 300, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = total
_lowerCAmelCase : Any = "" if prefix is None else prefix
_lowerCAmelCase : str = leave
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Optional[int] = width
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = None
def snake_case__ ( self, __a, __a = False, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[Any] = value
if comment is not None:
_lowerCAmelCase : Union[str, Any] = comment
if self.last_value is None:
_lowerCAmelCase : Optional[int] = time.time()
_lowerCAmelCase : Any = value
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Dict = self.warmup
_lowerCAmelCase : Union[str, Any] = 1
self.update_bar(__a)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : str = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowerCAmelCase : List[str] = self.elapsed_time / (value - self.start_value)
else:
_lowerCAmelCase : Optional[Any] = None
if value >= self.total:
_lowerCAmelCase : Optional[int] = self.total
_lowerCAmelCase : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowerCAmelCase : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(__a)
_lowerCAmelCase : str = value
_lowerCAmelCase : List[Any] = current_time
if self.average_time_per_item is None:
_lowerCAmelCase : List[str] = 1
else:
_lowerCAmelCase : Tuple = max(int(self.update_every / self.average_time_per_item), 1)
def snake_case__ ( self, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = " " * (len(str(self.total)) - len(str(__a))) + str(__a)
if self.elapsed_time is None:
_lowerCAmelCase : Optional[int] = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
_lowerCAmelCase : Optional[int] = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
_lowerCAmelCase : List[str] = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowerCAmelCase : List[str] = disp.display(disp.HTML(self.html_code), display_id=__a)
else:
self.output.update(disp.HTML(self.html_code))
def snake_case__ ( self):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=None):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : str = None if column_names is None else [column_names]
_lowerCAmelCase : List[Any] = None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowerCAmelCase : Union[str, Any] = disp.display(disp.HTML(self.html_code), display_id=__a)
else:
self.output.update(disp.HTML(self.html_code))
def snake_case__ ( self, __a):
'''simple docstring'''
if self.inner_table is None:
_lowerCAmelCase : str = [list(values.keys()), list(values.values())]
else:
_lowerCAmelCase : List[Any] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__a)
_lowerCAmelCase : Optional[Any] = columns
self.inner_table.append([values[c] for c in columns])
def snake_case__ ( self, __a, __a=None, __a=300):
'''simple docstring'''
_lowerCAmelCase : Any = NotebookProgressBar(__a, prefix=__a, parent=self, width=__a)
return self.child_bar
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = None
self.display()
class UpperCAmelCase_ ( a):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = False
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Union[str, Any] = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
_lowerCAmelCase : List[Any] = NotebookTrainingTracker(state.max_steps, __a)
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : str = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1, comment=f"Epoch {epoch}/{state.num_train_epochs}", force_update=self._force_next_update, )
_lowerCAmelCase : Tuple = False
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
if not has_length(__a):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowerCAmelCase : Any = self.training_tracker.add_child(len(__a))
else:
_lowerCAmelCase : Tuple = NotebookProgressBar(len(__a))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowerCAmelCase : List[Any] = None
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowerCAmelCase : Any = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowerCAmelCase : List[str] = state.global_step
self.training_tracker.write_line(__a)
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
if self.training_tracker is not None:
_lowerCAmelCase : Tuple = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
_lowerCAmelCase : Tuple = log["loss"]
break
if self.first_column == "Epoch":
_lowerCAmelCase : int = int(state.epoch)
else:
_lowerCAmelCase : Optional[Any] = state.global_step
_lowerCAmelCase : Union[str, Any] = "eval"
for k in metrics:
if k.endswith("_loss"):
_lowerCAmelCase : Any = re.sub(R"\_loss$", "", __a)
_lowerCAmelCase : Optional[Any] = metrics.pop("total_flos", __a)
_lowerCAmelCase : str = metrics.pop("epoch", __a)
_lowerCAmelCase : Any = metrics.pop(f"{metric_key_prefix}_runtime", __a)
_lowerCAmelCase : Union[str, Any] = metrics.pop(f"{metric_key_prefix}_samples_per_second", __a)
_lowerCAmelCase : List[str] = metrics.pop(f"{metric_key_prefix}_steps_per_second", __a)
_lowerCAmelCase : List[str] = metrics.pop(f"{metric_key_prefix}_jit_compilation_time", __a)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
_lowerCAmelCase : List[Any] = v
else:
_lowerCAmelCase : str = k.split("_")
_lowerCAmelCase : int = " ".join([part.capitalize() for part in splits[1:]])
_lowerCAmelCase : List[str] = v
self.training_tracker.write_line(__a)
self.training_tracker.remove_child()
_lowerCAmelCase : List[Any] = None
# Evaluation takes a long time so we should force the next update.
_lowerCAmelCase : int = True
def snake_case__ ( self, __a, __a, __a, **__a):
'''simple docstring'''
self.training_tracker.update(
state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=__a)
_lowerCAmelCase : Dict = None
| 717 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.