code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from .generation import TFGenerationMixin
class __lowercase ( lowerCamelCase__ ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase__ , )
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : Dict = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : List[Any] = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
UpperCAmelCase__ : List[Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase = DistilBertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Optional[Any]:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , lowercase_) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_) != tokenize_chinese_chars
):
__snake_case = getattr(lowercase_ , normalizer_state.pop('type'))
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**lowercase_)
__snake_case = do_lower_case
def _a ( self , lowercase_ , lowercase_=None) -> Dict:
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 676 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = (DPMSolverSDEScheduler,)
__UpperCAmelCase = 10
def _a ( self , **lowercase_) -> List[str]:
__snake_case = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowercase_)
return config
def _a ( self) -> Any:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_)
def _a ( self) -> Dict:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def _a ( self) -> List[str]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_)
def _a ( self) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
__snake_case = scheduler.scale_model_input(lowercase_ , lowercase_)
__snake_case = model(lowercase_ , lowercase_)
__snake_case = scheduler.step(lowercase_ , lowercase_ , lowercase_)
__snake_case = output.prev_sample
__snake_case = torch.sum(torch.abs(lowercase_))
__snake_case = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def _a ( self) -> Union[str, Any]:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(prediction_type='v_prediction')
__snake_case = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
__snake_case = scheduler.scale_model_input(lowercase_ , lowercase_)
__snake_case = model(lowercase_ , lowercase_)
__snake_case = scheduler.step(lowercase_ , lowercase_ , lowercase_)
__snake_case = output.prev_sample
__snake_case = torch.sum(torch.abs(lowercase_))
__snake_case = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621) < 1e-3
def _a ( self) -> Optional[int]:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.to(lowercase_) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__snake_case = scheduler.scale_model_input(lowercase_ , lowercase_)
__snake_case = model(lowercase_ , lowercase_)
__snake_case = scheduler.step(lowercase_ , lowercase_ , lowercase_)
__snake_case = output.prev_sample
__snake_case = torch.sum(torch.abs(lowercase_))
__snake_case = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326) < 1e-3
def _a ( self) -> List[Any]:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter.to(lowercase_) * scheduler.init_noise_sigma
__snake_case = sample.to(lowercase_)
for t in scheduler.timesteps:
__snake_case = scheduler.scale_model_input(lowercase_ , lowercase_)
__snake_case = model(lowercase_ , lowercase_)
__snake_case = scheduler.step(lowercase_ , lowercase_ , lowercase_)
__snake_case = output.prev_sample
__snake_case = torch.sum(torch.abs(lowercase_))
__snake_case = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811) < 1e-2
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=5_1_2 , lowercase_="cls" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
__snake_case = project_dim
__snake_case = pooler_fn
__snake_case = learn_encoder
__snake_case = use_attention_mask
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [R'''pooler''', R'''logit_scale''']
__UpperCAmelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
__UpperCAmelCase = '''roberta'''
__UpperCAmelCase = RobertaSeriesConfig
def __init__( self , lowercase_) -> Optional[int]:
super().__init__(lowercase_)
__snake_case = XLMRobertaModel(lowercase_)
__snake_case = nn.Linear(config.hidden_size , config.project_dim)
__snake_case = getattr(lowercase_ , 'has_pre_transformation' , lowercase_)
if self.has_pre_transformation:
__snake_case = nn.Linear(config.hidden_size , config.project_dim)
__snake_case = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Tuple:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.base_model(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_attentions=lowercase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowercase_ , )
if self.has_pre_transformation:
__snake_case = outputs['hidden_states'][-2]
__snake_case = self.pre_LN(lowercase_)
__snake_case = self.transformation_pre(lowercase_)
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 676 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase__ : Dict = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCAmelCase__ : Optional[int] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCAmelCase__ : List[str] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def A ( snake_case__ : List[str] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = simple_accuracy(snake_case__ , snake_case__ )
__snake_case = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( snake_case__ : List[Any] , snake_case__ : Any ) -> str:
'''simple docstring'''
__snake_case = np.array(snake_case__ )
__snake_case = np.array(snake_case__ )
__snake_case = en_sentvecs.shape[0]
# mean centering
__snake_case = en_sentvecs - np.mean(snake_case__ , axis=0 )
__snake_case = in_sentvecs - np.mean(snake_case__ , axis=0 )
__snake_case = cdist(snake_case__ , snake_case__ , 'cosine' )
__snake_case = np.array(range(snake_case__ ) )
__snake_case = sim.argsort(axis=1 )[:, :10]
__snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _a ( self) -> Union[str, Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def _a ( self , lowercase_ , lowercase_) -> List[str]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowercase_ , lowercase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowercase_ , lowercase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 676 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
def A ( ) -> List[str]:
'''simple docstring'''
# Get the sagemaker specific mp parameters from smp_options variable.
__snake_case = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__snake_case = json.loads(snake_case__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__snake_case = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__snake_case = json.loads(snake_case__ )
if not mpi_options.get('sagemaker_mpi_enabled' , snake_case__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def _a ( self) -> List[Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , lowercase_ , )
@cached_property
def _a ( self) -> "torch.device":
logger.info('PyTorch: setting up devices')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch')
if self.no_cuda:
__snake_case = torch.device('cpu')
__snake_case = 0
elif is_sagemaker_model_parallel_available():
__snake_case = smp.local_rank()
__snake_case = torch.device('cuda' , lowercase_)
__snake_case = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta)
__snake_case = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))
__snake_case = torch.device('cuda' , self.local_rank)
__snake_case = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__snake_case = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__snake_case = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta)
__snake_case = torch.device('cuda' , self.local_rank)
__snake_case = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_)
return device
@property
def _a ( self) -> Optional[int]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self) -> Optional[Any]:
return not is_sagemaker_model_parallel_available()
@property
def _a ( self) -> Any:
return False
| 676 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
import re
def A ( snake_case__ : str ) -> bool:
'''simple docstring'''
__snake_case = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(snake_case__ , snake_case__ ) )
if __name__ == "__main__":
UpperCAmelCase__ : int = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 676 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowercase ( datasets.BuilderConfig ):
__UpperCAmelCase = None
class __lowercase ( datasets.ArrowBasedBuilder ):
__UpperCAmelCase = PandasConfig
def _a ( self) -> Optional[int]:
return datasets.DatasetInfo(features=self.config.features)
def _a ( self , lowercase_) -> int:
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__snake_case = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowercase_ , (str, list, tuple)):
__snake_case = data_files
if isinstance(lowercase_ , lowercase_):
__snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case = [dl_manager.iter_files(lowercase_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__snake_case = []
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_):
__snake_case = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case = [dl_manager.iter_files(lowercase_) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={'files': files}))
return splits
def _a ( self , lowercase_) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case = table_cast(lowercase_ , self.config.features.arrow_schema)
return pa_table
def _a ( self , lowercase_) -> Optional[Any]:
for i, file in enumerate(itertools.chain.from_iterable(lowercase_)):
with open(lowercase_ , 'rb') as f:
__snake_case = pa.Table.from_pandas(pd.read_pickle(lowercase_))
yield i, self._cast_table(lowercase_)
| 676 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 1 |
from statistics import mean
import numpy as np
def A ( snake_case__ : list , snake_case__ : list , snake_case__ : list , snake_case__ : int ) -> list:
'''simple docstring'''
__snake_case = 0
# Number of processes finished
__snake_case = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case = [0] * no_of_process
# List to include calculation results
__snake_case = [0] * no_of_process
# Sort by arrival time.
__snake_case = [burst_time[i] for i in np.argsort(snake_case__ )]
__snake_case = [process_name[i] for i in np.argsort(snake_case__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case = arrival_time[i]
__snake_case = 0
# Index showing the location of the process being performed
__snake_case = 0
# Saves the current response ratio.
__snake_case = 0
for i in range(0 , snake_case__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case = temp
__snake_case = i
# Calculate the turn around time
__snake_case = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A ( snake_case__ : list , snake_case__ : list , snake_case__ : list , snake_case__ : int ) -> list:
'''simple docstring'''
__snake_case = [0] * no_of_process
for i in range(0 , snake_case__ ):
__snake_case = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase__ : str = 5
UpperCAmelCase__ : Union[str, Any] = ["A", "B", "C", "D", "E"]
UpperCAmelCase__ : Optional[Any] = [1, 2, 3, 4, 5]
UpperCAmelCase__ : Any = [1, 2, 3, 4, 5]
UpperCAmelCase__ : Optional[int] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase__ : Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 676 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 1 |
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
__snake_case = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__snake_case = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 676 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''mvp'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase_=5_0_2_6_7 , lowercase_=1_0_2_4 , lowercase_=1_2 , lowercase_=4_0_9_6 , lowercase_=1_6 , lowercase_=1_2 , lowercase_=4_0_9_6 , lowercase_=1_6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_="gelu" , lowercase_=1_0_2_4 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=False , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=True , lowercase_=2 , lowercase_=2 , lowercase_=False , lowercase_=1_0_0 , lowercase_=8_0_0 , **lowercase_ , ) -> Optional[Any]:
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = classifier_dropout
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = use_prompt
__snake_case = prompt_length
__snake_case = prompt_mid_dim
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowercase_):
__snake_case = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'The config can simply be saved and uploaded again to be fixed.')
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 1 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''speech''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['speech'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''speech''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['speech'])
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def A ( snake_case__ : Tuple , snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
# ===== initialization =====
__snake_case = Mock()
__snake_case = conn, Mock()
__snake_case = iter([1, None] )
__snake_case = lambda snake_case__ : next(snake_case__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=snake_case__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 676 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 1 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 1 |
import sys
from collections import defaultdict
class __lowercase :
def __init__( self) -> List[Any]:
__snake_case = []
def _a ( self , lowercase_) -> Tuple:
return self.node_position[vertex]
def _a ( self , lowercase_ , lowercase_) -> int:
__snake_case = pos
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__snake_case = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__snake_case = 2 * start + 1
else:
__snake_case = 2 * start + 2
if heap[smallest_child] < heap[start]:
__snake_case , __snake_case = heap[smallest_child], positions[smallest_child]
__snake_case , __snake_case = (
heap[start],
positions[start],
)
__snake_case , __snake_case = temp, tempa
__snake_case = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , lowercase_)
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
__snake_case = position[index]
while index != 0:
__snake_case = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
__snake_case = heap[parent]
__snake_case = position[parent]
self.set_position(position[parent] , lowercase_)
else:
__snake_case = val
__snake_case = temp
self.set_position(lowercase_ , lowercase_)
break
__snake_case = parent
else:
__snake_case = val
__snake_case = temp
self.set_position(lowercase_ , 0)
def _a ( self , lowercase_ , lowercase_) -> Optional[int]:
__snake_case = len(lowercase_) // 2 - 1
for i in range(lowercase_ , -1 , -1):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_) , lowercase_)
def _a ( self , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = positions[0]
__snake_case = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_) , lowercase_)
return temp
def A ( snake_case__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = Heap()
__snake_case = [0] * len(snake_case__ )
__snake_case = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__snake_case = [] # Heap of Distance of vertices from their neighboring vertex
__snake_case = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
__snake_case = []
__snake_case = 1
__snake_case = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__snake_case = 0
__snake_case = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
__snake_case = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__snake_case = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
__snake_case = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
__snake_case = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : List[str] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : Tuple = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : str = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''openai-gpt'''
__UpperCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase_=4_0_4_7_8 , lowercase_=5_1_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1e-5 , lowercase_=0.02 , lowercase_="cls_index" , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=0.1 , **lowercase_ , ) -> Optional[int]:
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = afn
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = summary_type
__snake_case = summary_use_proj
__snake_case = summary_activation
__snake_case = summary_first_dropout
__snake_case = summary_proj_to_labels
super().__init__(**lowercase_)
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Optional[int]:
__snake_case = inspect.getfile(accelerate.test_utils)
__snake_case = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
__snake_case = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_distributed_data_loop.py'])
__snake_case = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_ops.py'])
@require_multi_gpu
def _a ( self) -> Optional[Any]:
print(F"Found {torch.cuda.device_count()} devices.")
__snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowercase_ , env=os.environ.copy())
@require_multi_gpu
def _a ( self) -> str:
print(F"Found {torch.cuda.device_count()} devices.")
__snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}")
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowercase_ , env=os.environ.copy())
@require_multi_gpu
def _a ( self) -> Optional[int]:
__snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowercase_ , env=os.environ.copy())
@require_multi_gpu
def _a ( self) -> int:
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only")
__snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1'):
execute_subprocess_async(lowercase_ , env=os.environ.copy())
if __name__ == "__main__":
UpperCAmelCase__ : str = Accelerator()
UpperCAmelCase__ : str = (accelerator.state.process_index + 2, 10)
UpperCAmelCase__ : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase__ : Optional[Any] = ""
UpperCAmelCase__ : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase__ : Optional[Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase__ : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 676 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__UpperCAmelCase = '''CIDAS/clipseg-rd64-refined'''
__UpperCAmelCase = '''image_segmenter'''
__UpperCAmelCase = CLIPSegForImageSegmentation
__UpperCAmelCase = ['''image''', '''text''']
__UpperCAmelCase = ['''image''']
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['vision'])
super().__init__(*lowercase_ , **lowercase_)
def _a ( self , lowercase_ , lowercase_) -> str:
return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors='pt')
def _a ( self , lowercase_) -> int:
with torch.no_grad():
__snake_case = self.model(**lowercase_).logits
return logits
def _a ( self , lowercase_) -> List[str]:
__snake_case = outputs.cpu().detach().numpy()
__snake_case = 0
__snake_case = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 676 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 1 |
from string import ascii_uppercase
UpperCAmelCase__ : str = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase__ : Optional[int] = dict(enumerate(ascii_uppercase))
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = 0
while True:
if x == i:
__snake_case = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = ''
__snake_case = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__snake_case = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = ''
__snake_case = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__snake_case = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
__snake_case = 'THE GERMAN ATTACK'
__snake_case = 'SECRET'
__snake_case = generate_key(snake_case__ , snake_case__ )
__snake_case = cipher_text(snake_case__ , snake_case__ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(snake_case__ , snake_case__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 676 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 1 |
from __future__ import annotations
UpperCAmelCase__ : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> None:
__snake_case = graph
# mapping node to its parent in resulting breadth first tree
__snake_case = {}
__snake_case = source_vertex
def _a ( self) -> None:
__snake_case = {self.source_vertex}
__snake_case = None
__snake_case = [self.source_vertex] # first in first out queue
while queue:
__snake_case = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_)
__snake_case = vertex
queue.append(lowercase_)
def _a ( self , lowercase_) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case = self.parent.get(lowercase_)
if target_vertex_parent is None:
__snake_case = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(lowercase_)
return self.shortest_path(lowercase_) + F"->{target_vertex}"
if __name__ == "__main__":
UpperCAmelCase__ : Dict = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 676 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 1 |
from math import pow, sqrt
def A ( *snake_case__ : float ) -> bool:
'''simple docstring'''
__snake_case = len(snake_case__ ) > 0 and all(value > 0.0 for value in values )
return result
def A ( snake_case__ : float , snake_case__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(snake_case__ , snake_case__ , snake_case__ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 676 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 1 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
__snake_case = 0
__snake_case = number
while duplicate > 0:
__snake_case , __snake_case = divmod(snake_case__ , 10 )
fact_sum += factorial(snake_case__ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
UpperCAmelCase__ : str = int(input("Enter number: ").strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 676 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 1 |
from typing import List
import numpy as np
def A ( snake_case__ : dict ) -> int:
'''simple docstring'''
__snake_case = {key: len(snake_case__ ) for key, value in gen_kwargs.items() if isinstance(snake_case__ , snake_case__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__snake_case = max(lists_lengths.values() , default=0 )
return max(1 , snake_case__ )
def A ( snake_case__ : int , snake_case__ : int ) -> List[range]:
'''simple docstring'''
__snake_case = []
for group_idx in range(snake_case__ ):
__snake_case = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__snake_case = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__snake_case = range(snake_case__ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case__ )
return shards_indices_per_group
def A ( snake_case__ : dict , snake_case__ : int ) -> List[dict]:
'''simple docstring'''
__snake_case = _number_of_shards_in_gen_kwargs(snake_case__ )
if num_shards == 1:
return [dict(snake_case__ )]
else:
__snake_case = _distribute_shards(num_shards=snake_case__ , max_num_jobs=snake_case__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case__ , snake_case__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case__ ) )
]
def A ( snake_case__ : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A ( snake_case__ : np.random.Generator , snake_case__ : dict ) -> dict:
'''simple docstring'''
__snake_case = {len(snake_case__ ) for value in gen_kwargs.values() if isinstance(snake_case__ , snake_case__ )}
__snake_case = {}
for size in list_sizes:
__snake_case = list(range(snake_case__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__snake_case = dict(snake_case__ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case__ , snake_case__ ):
__snake_case = [value[i] for i in indices_per_size[len(snake_case__ )]]
return shuffled_kwargs
| 676 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = "Hello world! cécé herlolip"
def A ( snake_case__ : str , snake_case__ : str , snake_case__ : bool ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FairseqRobertaModel.from_pretrained(snake_case__ )
roberta.eval() # disable dropout
__snake_case = roberta.model.encoder.sentence_encoder
__snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , snake_case__ )
__snake_case = XLMRobertaXLForSequenceClassification(snake_case__ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__snake_case = roberta_sent_encoder.embed_tokens.weight
__snake_case = roberta_sent_encoder.embed_positions.weight
__snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__snake_case = roberta_sent_encoder.layer_norm.weight
__snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__snake_case = model.roberta.encoder.layer[i]
__snake_case = roberta_sent_encoder.layers[i]
__snake_case = layer.attention
__snake_case = roberta_layer.self_attn_layer_norm.weight
__snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
__snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__snake_case = roberta_layer.self_attn.q_proj.weight
__snake_case = roberta_layer.self_attn.q_proj.bias
__snake_case = roberta_layer.self_attn.k_proj.weight
__snake_case = roberta_layer.self_attn.k_proj.bias
__snake_case = roberta_layer.self_attn.v_proj.weight
__snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
__snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__snake_case = roberta_layer.self_attn.out_proj.weight
__snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__snake_case = roberta_layer.final_layer_norm.weight
__snake_case = roberta_layer.final_layer_norm.bias
# intermediate
__snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case = roberta_layer.fca.weight
__snake_case = roberta_layer.fca.bias
# output
__snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case = roberta_layer.fca.weight
__snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'].dense.weight
__snake_case = roberta.model.classification_heads['mnli'].dense.bias
__snake_case = roberta.model.classification_heads['mnli'].out_proj.weight
__snake_case = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__snake_case = roberta.model.encoder.lm_head.dense.weight
__snake_case = roberta.model.encoder.lm_head.dense.bias
__snake_case = roberta.model.encoder.lm_head.layer_norm.weight
__snake_case = roberta.model.encoder.lm_head.layer_norm.bias
__snake_case = roberta.model.encoder.lm_head.weight
__snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__snake_case = roberta.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
__snake_case = model(snake_case__ )[0]
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'](roberta.extract_features(snake_case__ ) )
else:
__snake_case = roberta.model(snake_case__ )[0]
print(our_output.shape , their_output.shape )
__snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__snake_case = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 676 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase__ : List[str] = True
from torch.cuda.amp import autocast
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
def A ( snake_case__ : Dict=None , snake_case__ : Optional[Any]=None ) -> Optional[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class __lowercase :
__UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
__UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
__UpperCAmelCase = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
__UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
__UpperCAmelCase = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
__UpperCAmelCase = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class __lowercase :
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
__UpperCAmelCase = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class __lowercase :
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , lowercase_) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__snake_case = [{'input_values': feature['input_values']} for feature in features]
__snake_case = [{'input_ids': feature['labels']} for feature in features]
__snake_case = self.processor.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__snake_case = self.processor.pad(
labels=lowercase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__snake_case = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1) , -1_0_0)
__snake_case = labels
return batch
class __lowercase ( lowerCamelCase__ ):
def _a ( self , lowercase_ , lowercase_) -> torch.Tensor:
model.train()
__snake_case = self._prepare_inputs(lowercase_)
if self.use_amp:
with autocast():
__snake_case = self.compute_loss(lowercase_ , lowercase_)
else:
__snake_case = self.compute_loss(lowercase_ , lowercase_)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__snake_case = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
__snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_)
else:
loss.backward()
return loss.detach()
def A ( ) -> str:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__snake_case = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__snake_case = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__snake_case = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(snake_case__ : str ):
__snake_case = re.sub(snake_case__ , '' , batch['sentence'] ).lower() + ' '
return batch
__snake_case = train_dataset.map(snake_case__ , remove_columns=['sentence'] )
__snake_case = eval_dataset.map(snake_case__ , remove_columns=['sentence'] )
def extract_all_chars(snake_case__ : Optional[Any] ):
__snake_case = ' '.join(batch['text'] )
__snake_case = list(set(snake_case__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__snake_case = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , )
__snake_case = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , )
__snake_case = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__snake_case = {v: k for k, v in enumerate(snake_case__ )}
__snake_case = vocab_dict[' ']
del vocab_dict[" "]
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(snake_case__ , snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ )
__snake_case = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
__snake_case = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__snake_case = min(len(snake_case__ ) , data_args.max_train_samples )
__snake_case = train_dataset.select(range(snake_case__ ) )
if data_args.max_val_samples is not None:
__snake_case = eval_dataset.select(range(data_args.max_val_samples ) )
__snake_case = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case__ : List[str] ):
__snake_case , __snake_case = torchaudio.load(batch['path'] )
__snake_case = resampler(snake_case__ ).squeeze().numpy()
__snake_case = 1_6000
__snake_case = batch['text']
return batch
__snake_case = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__snake_case = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case__ : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
__snake_case = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(snake_case__ )
return batch
__snake_case = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
__snake_case = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__snake_case = datasets.load_metric('wer' )
def compute_metrics(snake_case__ : int ):
__snake_case = pred.predictions
__snake_case = np.argmax(snake_case__ , axis=-1 )
__snake_case = processor.tokenizer.pad_token_id
__snake_case = processor.batch_decode(snake_case__ )
# we do not want to group tokens when computing the metrics
__snake_case = processor.batch_decode(pred.label_ids , group_tokens=snake_case__ )
__snake_case = wer_metric.compute(predictions=snake_case__ , references=snake_case__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__snake_case = DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ )
# Initialize our Trainer
__snake_case = CTCTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__snake_case = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__snake_case = model_args.model_name_or_path
else:
__snake_case = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__snake_case = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
__snake_case = train_result.metrics
__snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
__snake_case = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('train' , snake_case__ )
trainer.save_metrics('train' , snake_case__ )
trainer.save_state()
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case = trainer.evaluate()
__snake_case = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ )
__snake_case = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
return results
if __name__ == "__main__":
main()
| 676 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __lowercase ( nn.Module ):
def __init__( self) -> str:
super().__init__()
__snake_case = nn.Linear(3 , 4)
__snake_case = nn.BatchNormad(4)
__snake_case = nn.Linear(4 , 5)
def _a ( self , lowercase_) -> str:
return self.lineara(self.batchnorm(self.lineara(lowercase_)))
class __lowercase ( lowerCamelCase__ ):
def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> List[Any]:
return (args[0] + 1,) + args[1:], kwargs
class __lowercase ( lowerCamelCase__ ):
def _a ( self , lowercase_ , lowercase_) -> int:
return output + 1
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Dict:
__snake_case = ModelForTest()
__snake_case = ModelHook()
add_hook_to_module(lowercase_ , lowercase_)
self.assertEqual(test_model._hf_hook , lowercase_)
self.assertTrue(hasattr(lowercase_ , '_old_forward'))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['x'])
remove_hook_from_module(lowercase_)
self.assertFalse(hasattr(lowercase_ , '_hf_hook'))
self.assertFalse(hasattr(lowercase_ , '_old_forward'))
def _a ( self) -> Optional[int]:
__snake_case = ModelForTest()
__snake_case = ModelHook()
add_hook_to_module(lowercase_ , lowercase_)
add_hook_to_module(lowercase_ , lowercase_ , append=lowercase_)
self.assertEqual(isinstance(test_model._hf_hook , lowercase_) , lowercase_)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(lowercase_ , '_old_forward'))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['x'])
remove_hook_from_module(lowercase_)
self.assertFalse(hasattr(lowercase_ , '_hf_hook'))
self.assertFalse(hasattr(lowercase_ , '_old_forward'))
def _a ( self) -> int:
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3)
__snake_case = test_model(x + 1)
__snake_case = test_model(x + 2)
__snake_case = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case = PreForwardHook()
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
__snake_case = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
assert torch.allclose(lowercase_ , lowercase_ , atol=1e-5)
def _a ( self) -> List[str]:
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3)
__snake_case = test_model(lowercase_)
__snake_case = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
__snake_case = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
assert torch.allclose(lowercase_ , output + 2 , atol=1e-5)
def _a ( self) -> List[Any]:
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3)
__snake_case = test_model(lowercase_)
__snake_case = PostForwardHook()
add_hook_to_module(lowercase_ , lowercase_)
__snake_case = test_model(lowercase_)
self.assertTrue(torch.allclose(lowercase_ , output + 1))
self.assertTrue(outputa.requires_grad)
__snake_case = True
__snake_case = test_model(lowercase_)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def _a ( self) -> List[str]:
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowercase_ , AlignDevicesHook(io_same_device=lowercase_))
__snake_case = torch.randn(2 , 3).to(0)
__snake_case = model(lowercase_)
self.assertEqual(output.device , torch.device(0))
def _a ( self) -> List[str]:
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
__snake_case = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_))
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(hook_kwargs['execution_device'])
self.assertEqual(model.batchnorm.running_mean.device , lowercase_)
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , lowercase_)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
__snake_case = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_))
add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , lowercase_)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
def _a ( self) -> Dict:
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
__snake_case = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(lowercase_)
self.assertEqual(model.batchnorm.running_mean.device , lowercase_)
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , lowercase_)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ , offload_buffers=lowercase_)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , lowercase_)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
def _a ( self) -> str:
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
__snake_case = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(lowercase_)
self.assertEqual(model.batchnorm.running_mean.device , lowercase_)
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , lowercase_)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
attach_align_device_hook(
lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() , offload_buffers=lowercase_ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
__snake_case = torch.randn(2 , 3)
__snake_case = model(lowercase_)
self.assertEqual(output.device , lowercase_)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowercase_)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase__ : List[str] = get_logger(__name__)
class __lowercase :
def __init__( self , lowercase_ = None) -> List[Any]:
__snake_case = (
os.path.join(lowercase_ , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__snake_case = Extractor
def _a ( self , lowercase_) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__snake_case = os.path.abspath(lowercase_)
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase_))
def _a ( self , lowercase_ , lowercase_) -> bool:
return force_extract or (
not os.path.isfile(lowercase_) and not (os.path.isdir(lowercase_) and os.listdir(lowercase_))
)
def _a ( self , lowercase_ , lowercase_ = False) -> str:
__snake_case = self.extractor.infer_extractor_format(lowercase_)
if not extractor_format:
return input_path
__snake_case = self._get_output_path(lowercase_)
if self._do_extract(lowercase_ , lowercase_):
self.extractor.extract(lowercase_ , lowercase_ , lowercase_)
return output_path
class __lowercase ( lowerCamelCase__ ):
@classmethod
@abstractmethod
def _a ( cls , lowercase_ , **lowercase_) -> bool:
...
@staticmethod
@abstractmethod
def _a ( lowercase_ , lowercase_) -> None:
...
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = []
@staticmethod
def _a ( lowercase_ , lowercase_) -> Optional[Any]:
with open(lowercase_ , 'rb') as f:
return f.read(lowercase_)
@classmethod
def _a ( cls , lowercase_ , lowercase_ = b"") -> bool:
if not magic_number:
__snake_case = max(len(lowercase_) for cls_magic_number in cls.magic_numbers)
try:
__snake_case = cls.read_magic_number(lowercase_ , lowercase_)
except OSError:
return False
return any(magic_number.startswith(lowercase_) for cls_magic_number in cls.magic_numbers)
class __lowercase ( lowerCamelCase__ ):
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> bool:
return tarfile.is_tarfile(lowercase_)
@staticmethod
def _a ( lowercase_ , lowercase_) -> Any:
def resolved(lowercase_) -> str:
return os.path.realpath(os.path.abspath(lowercase_))
def badpath(lowercase_ , lowercase_) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase_ , lowercase_)).startswith(lowercase_)
def badlink(lowercase_ , lowercase_) -> bool:
# Links are interpreted relative to the directory containing the link
__snake_case = resolved(os.path.join(lowercase_ , os.path.dirname(info.name)))
return badpath(info.linkname , base=lowercase_)
__snake_case = resolved(lowercase_)
for finfo in members:
if badpath(finfo.name , lowercase_):
logger.error(F"Extraction of {finfo.name} is blocked (illegal path)")
elif finfo.issym() and badlink(lowercase_ , lowercase_):
logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}")
elif finfo.islnk() and badlink(lowercase_ , lowercase_):
logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}")
else:
yield finfo
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = tarfile.open(lowercase_)
tar_file.extractall(lowercase_ , members=TarExtractor.safemembers(lowercase_ , lowercase_))
tar_file.close()
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''\x1F\x8B''']
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
with gzip.open(lowercase_ , 'rb') as gzip_file:
with open(lowercase_ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def _a ( cls , lowercase_ , lowercase_ = b"") -> bool:
if super().is_extractable(lowercase_ , magic_number=lowercase_):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase_ , 'rb') as fp:
__snake_case = _EndRecData(lowercase_)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__snake_case = fp.read(lowercase_) # CD is where we expect it to be
if len(lowercase_) == sizeCentralDir:
__snake_case = struct.unpack(lowercase_ , lowercase_) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
os.makedirs(lowercase_ , exist_ok=lowercase_)
with zipfile.ZipFile(lowercase_ , 'r') as zip_file:
zip_file.extractall(lowercase_)
zip_file.close()
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
with lzma.open(lowercase_) as compressed_file:
with open(lowercase_ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile')
import rarfile
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = rarfile.RarFile(lowercase_)
rf.extractall(lowercase_)
rf.close()
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard')
import zstandard as zstd
__snake_case = zstd.ZstdDecompressor()
with open(lowercase_ , 'rb') as ifh, open(lowercase_ , 'wb') as ofh:
dctx.copy_stream(lowercase_ , lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''\x42\x5A\x68''']
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
with bza.open(lowercase_ , 'rb') as compressed_file:
with open(lowercase_ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr')
import pyazr
os.makedirs(lowercase_ , exist_ok=lowercase_)
with pyazr.SevenZipFile(lowercase_ , 'r') as archive:
archive.extractall(lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [b'''\x04\x22\x4D\x18''']
@staticmethod
def _a ( lowercase_ , lowercase_) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4')
import lza.frame
with lza.frame.open(lowercase_ , 'rb') as compressed_file:
with open(lowercase_ , 'wb') as extracted_file:
shutil.copyfileobj(lowercase_ , lowercase_)
class __lowercase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__UpperCAmelCase = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _a ( cls) -> int:
return max(
len(lowercase_)
for extractor in cls.extractors.values()
if issubclass(lowercase_ , lowercase_)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def _a ( lowercase_ , lowercase_) -> Tuple:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase_ , magic_number_length=lowercase_)
except OSError:
return b""
@classmethod
def _a ( cls , lowercase_ , lowercase_ = False) -> bool:
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=lowercase_ , )
__snake_case = cls.infer_extractor_format(lowercase_)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _a ( cls , lowercase_) -> str: # <Added version="2.4.0"/>
__snake_case = cls._get_magic_number_max_length()
__snake_case = cls._read_magic_number(lowercase_ , lowercase_)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase_ , magic_number=lowercase_):
return extractor_format
@classmethod
def _a ( cls , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase_) , exist_ok=lowercase_)
# Prevent parallel extractions
__snake_case = str(Path(lowercase_).with_suffix('.lock'))
with FileLock(lowercase_):
shutil.rmtree(lowercase_ , ignore_errors=lowercase_)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase_ , lowercase_): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=lowercase_ , )
__snake_case = extractor if extractor != 'deprecated' else extractor_format
else:
__snake_case = cls.extractors[extractor_format]
return extractor.extract(lowercase_ , lowercase_)
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=lowercase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase_):
return extractor.extract(lowercase_ , lowercase_)
| 676 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
def A ( snake_case__ : int = 1000 ) -> int:
'''simple docstring'''
__snake_case , __snake_case = 1, 1
__snake_case = []
for i in range(1 , n + 1 ):
__snake_case = prev_numerator + 2 * prev_denominator
__snake_case = prev_numerator + prev_denominator
if len(str(snake_case__ ) ) > len(str(snake_case__ ) ):
result.append(snake_case__ )
__snake_case = numerator
__snake_case = denominator
return len(snake_case__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=3_0 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.02 , lowercase_=None , ) -> List[Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def _a ( self) -> Optional[int]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> Optional[int]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = ViTMSNModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = self.type_sequence_label_size
__snake_case = ViTMSNForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , labels=lowercase_)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case = 1
__snake_case = ViTMSNForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case = model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> int:
__snake_case = ViTMSNModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7)
def _a ( self) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def _a ( self) -> int:
pass
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _a ( self) -> Tuple:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _a ( self) -> Any:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTMSNModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> Dict:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def _a ( self) -> Dict:
torch.manual_seed(2)
__snake_case = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').to(lowercase_)
# forward pass
with torch.no_grad():
__snake_case = model(**lowercase_)
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = torch.tensor([-0.0803, -0.4454, -0.2375]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import numpy as np
def A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1e-12 , snake_case__ : int = 100 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
__snake_case = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__snake_case = False
__snake_case = 0
__snake_case = 0
__snake_case = 1e12
while not convergence:
# Multiple matrix by the vector.
__snake_case = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
__snake_case = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__snake_case = vector.conj().T if is_complex else vector.T
__snake_case = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
__snake_case = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__snake_case = True
__snake_case = lambda_
if is_complex:
__snake_case = np.real(lambda_ )
return lambda_, vector
def A ( ) -> None:
'''simple docstring'''
__snake_case = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__snake_case = np.array([41, 4, 20] )
__snake_case = real_input_matrix.astype(np.complexaaa )
__snake_case = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__snake_case = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__snake_case = real_input_matrix
__snake_case = real_vector
elif problem_type == "complex":
__snake_case = complex_input_matrix
__snake_case = complex_vector
# Our implementation.
__snake_case , __snake_case = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__snake_case , __snake_case = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
__snake_case = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__snake_case = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 676 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCAmelCase__ : Any = ["gpt2"]
UpperCAmelCase__ : List[str] = "gpt2"
if is_tf_available():
class __lowercase ( tf.Module ):
def __init__( self , lowercase_) -> int:
super().__init__()
__snake_case = tokenizer
__snake_case = AutoConfig.from_pretrained(lowercase_)
__snake_case = TFGPTaLMHeadModel.from_config(lowercase_)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text'),))
def _a ( self , lowercase_) -> Dict:
__snake_case = self.tokenizer(lowercase_)
__snake_case = tokenized['input_ids'].to_tensor()
__snake_case = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__snake_case = self.model(input_ids=lowercase_ , attention_mask=lowercase_)['logits']
return outputs
@require_tf
@require_keras_nlp
class __lowercase ( unittest.TestCase ):
def _a ( self) -> List[Any]:
super().setUp()
__snake_case = [GPTaTokenizer.from_pretrained(lowercase_) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__snake_case = [TFGPTaTokenizer.from_pretrained(lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
__snake_case = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__snake_case = list(zip(self.test_sentences , self.test_sentences[::-1]))
def _a ( self) -> List[str]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
__snake_case = tokenizer([test_inputs] , return_tensors='tf')
__snake_case = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__snake_case = python_outputs[key].numpy()
__snake_case = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(lowercase_ , tf.intaa) == tf_outputs_values))
@slow
def _a ( self) -> int:
for tf_tokenizer in self.tf_tokenizers:
__snake_case = tf.function(lowercase_)
for test_inputs in self.test_sentences:
__snake_case = tf.constant(lowercase_)
__snake_case = compiled_tokenizer(lowercase_)
__snake_case = tf_tokenizer(lowercase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def _a ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__snake_case = ModelToSave(tokenizer=lowercase_)
__snake_case = tf.convert_to_tensor([self.test_sentences[0]])
__snake_case = model.serving(lowercase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__snake_case = Path(lowercase_) / 'saved.model'
tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': model.serving})
__snake_case = tf.saved_model.load(lowercase_)
__snake_case = loaded_model.signatures['serving_default'](lowercase_)['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def _a ( self) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__snake_case = tf.convert_to_tensor([self.test_sentences[0]])
__snake_case = tf_tokenizer(lowercase_) # Build model with some sample inputs
__snake_case = tf_tokenizer.get_config()
__snake_case = TFGPTaTokenizer.from_config(lowercase_)
__snake_case = model_from_config(lowercase_)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def _a ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__snake_case = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__snake_case = tf.convert_to_tensor([self.test_sentences[0]])
__snake_case = tf_tokenizer(lowercase_ , max_length=lowercase_)
__snake_case = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 676 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( snake_case__ : str ) -> None:
'''simple docstring'''
__snake_case , __snake_case = analyze_text(snake_case__ )
__snake_case = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__snake_case = sum(single_char_strings.values() )
# one length string
__snake_case = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__snake_case = single_char_strings[ch]
__snake_case = my_str / all_sum
my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__snake_case = sum(two_char_strings.values() )
__snake_case = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__snake_case = cha + cha
if sequence in two_char_strings:
__snake_case = two_char_strings[sequence]
__snake_case = int(snake_case__ ) / all_sum
my_sec_sum += prob * math.loga(snake_case__ )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def A ( snake_case__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
__snake_case = Counter() # type: ignore
__snake_case = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(snake_case__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ) -> Tuple:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 676 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=3 , lowercase_=4 , lowercase_=2 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_6 , lowercase_=3 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=6 , lowercase_=6 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=1_0_0_0 , ) -> List[Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = patch_size
__snake_case = text_seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case = text_seq_length
__snake_case = (image_size // patch_size) ** 2 + 1
__snake_case = self.text_seq_length + self.image_seq_length
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
__snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.text_seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
__snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
__snake_case = LayoutLMvaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
# text + image
__snake_case = model(lowercase_ , pixel_values=lowercase_)
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
__snake_case = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_)
__snake_case = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
__snake_case = model(pixel_values=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = self.num_labels
__snake_case = LayoutLMvaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
__snake_case = self.num_labels
__snake_case = LayoutLMvaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
__snake_case = LayoutLMvaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _a ( self) -> str:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self) -> Tuple:
__snake_case = LayoutLMvaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> Union[str, Any]:
__snake_case = copy.deepcopy(lowercase_)
if model_class in get_values(lowercase_):
__snake_case = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(lowercase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_):
__snake_case = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in get_values(lowercase_):
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def _a ( self) -> Any:
self.config_tester.run_common_tests()
def _a ( self) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def _a ( self) -> Optional[int]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LayoutLMvaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> Tuple:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None
@slow
def _a ( self) -> Any:
__snake_case = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base').to(lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').pixel_values.to(lowercase_)
__snake_case = torch.tensor([[1, 2]])
__snake_case = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
__snake_case = model(
input_ids=input_ids.to(lowercase_) , bbox=bbox.to(lowercase_) , pixel_values=pixel_values.to(lowercase_) , )
# verify the logits
__snake_case = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
__snake_case = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A ( snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return x + 2
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Tuple:
__snake_case = 'x = 3'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result == 3
self.assertDictEqual(lowercase_ , {'x': 3})
__snake_case = 'x = y'
__snake_case = {'y': 5}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 5, 'y': 5})
def _a ( self) -> List[Any]:
__snake_case = 'y = add_two(x)'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 5})
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result is None
assert "tried to execute add_two" in out.out
def _a ( self) -> str:
__snake_case = 'x = 3'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result == 3
self.assertDictEqual(lowercase_ , {'x': 3})
def _a ( self) -> int:
__snake_case = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 5})
self.assertDictEqual(lowercase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def _a ( self) -> List[Any]:
__snake_case = 'x = 3\ny = 5'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 5})
def _a ( self) -> Optional[Any]:
__snake_case = 'text = f\'This is x: {x}.\''
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowercase_ , {'x': 3, 'text': 'This is x: 3.'})
def _a ( self) -> Optional[int]:
__snake_case = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 2})
__snake_case = {'x': 8}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 8, 'y': 5})
def _a ( self) -> Union[str, Any]:
__snake_case = 'test_list = [x, add_two(x)]'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
self.assertListEqual(lowercase_ , [3, 5])
self.assertDictEqual(lowercase_ , {'x': 3, 'test_list': [3, 5]})
def _a ( self) -> Optional[Any]:
__snake_case = 'y = x'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result == 3
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 3})
def _a ( self) -> Optional[int]:
__snake_case = 'test_list = [x, add_two(x)]\ntest_list[1]'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'test_list': [3, 5]})
__snake_case = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def _a ( self) -> List[str]:
__snake_case = 'x = 0\nfor i in range(3):\n x = i'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {'range': range} , state=lowercase_)
assert result == 2
self.assertDictEqual(lowercase_ , {'x': 2, 'i': 2})
| 676 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 676 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( snake_case__ : Optional[int] , snake_case__ : Any=0.999 , snake_case__ : str="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__snake_case = []
for i in range(snake_case__ ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase = 2
@register_to_config
def __init__( self , lowercase_ = 1_0_0_0 , lowercase_ = 0.0_0085 , lowercase_ = 0.012 , lowercase_ = "linear" , lowercase_ = None , lowercase_ = "epsilon" , lowercase_ = False , lowercase_ = False , lowercase_ = 1.0 , lowercase_ = "linspace" , lowercase_ = 0 , ) -> List[str]:
if trained_betas is not None:
__snake_case = torch.tensor(lowercase_ , dtype=torch.floataa)
elif beta_schedule == "linear":
__snake_case = torch.linspace(lowercase_ , lowercase_ , lowercase_ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase_ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(lowercase_ , alpha_transform_type='cosine')
elif beta_schedule == "exp":
__snake_case = betas_for_alpha_bar(lowercase_ , alpha_transform_type='exp')
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}")
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(lowercase_ , lowercase_ , lowercase_)
__snake_case = use_karras_sigmas
def _a ( self , lowercase_ , lowercase_=None) -> Union[str, Any]:
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
__snake_case = 1 if len(lowercase_) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(lowercase_) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _a ( self) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _a ( self , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
__snake_case = self.index_for_timestep(lowercase_)
__snake_case = self.sigmas[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ) -> str:
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , lowercase_ , dtype=lowercase_)[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , lowercase_) * step_ratio).round()[::-1].copy().astype(lowercase_)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(lowercase_ , 0 , -step_ratio)).round().copy().astype(lowercase_)
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.")
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
__snake_case = np.log(lowercase_)
__snake_case = np.interp(lowercase_ , np.arange(0 , len(lowercase_)) , lowercase_)
if self.config.use_karras_sigmas:
__snake_case = self._convert_to_karras(in_sigmas=lowercase_ , num_inference_steps=self.num_inference_steps)
__snake_case = np.array([self._sigma_to_t(lowercase_ , lowercase_) for sigma in sigmas])
__snake_case = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
__snake_case = torch.from_numpy(lowercase_).to(device=lowercase_)
__snake_case = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
__snake_case = torch.from_numpy(lowercase_)
__snake_case = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
if str(lowercase_).startswith('mps'):
# mps does not support float64
__snake_case = timesteps.to(lowercase_ , dtype=torch.floataa)
else:
__snake_case = timesteps.to(device=lowercase_)
# empty dt and derivative
__snake_case = None
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(lowercase_)
def _a ( self , lowercase_ , lowercase_) -> List[str]:
# get log sigma
__snake_case = np.log(lowercase_)
# get distribution
__snake_case = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__snake_case = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
__snake_case = low_idx + 1
__snake_case = log_sigmas[low_idx]
__snake_case = log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = np.clip(lowercase_ , 0 , 1)
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.reshape(sigma.shape)
return t
def _a ( self , lowercase_ , lowercase_) -> torch.FloatTensor:
__snake_case = in_sigmas[-1].item()
__snake_case = in_sigmas[0].item()
__snake_case = 7.0 # 7.0 is the value used in the paper
__snake_case = np.linspace(0 , 1 , lowercase_)
__snake_case = sigma_min ** (1 / rho)
__snake_case = sigma_max ** (1 / rho)
__snake_case = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _a ( self) -> Tuple:
return self.dt is None
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[SchedulerOutput, Tuple]:
__snake_case = self.index_for_timestep(lowercase_)
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(lowercase_) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.config.clip_sample:
__snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
# store for 2nd order step
__snake_case = derivative
__snake_case = dt
__snake_case = sample
else:
# 2. 2nd order / Heun's method
__snake_case = (sample - pred_original_sample) / sigma_next
__snake_case = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__snake_case = self.dt
__snake_case = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(lowercase_):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa)
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
__snake_case = self.timesteps.to(original_samples.device)
__snake_case = timesteps.to(original_samples.device)
__snake_case = [self.index_for_timestep(lowercase_ , lowercase_) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
__snake_case = sigma.unsqueeze(-1)
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> Tuple:
return self.config.num_train_timesteps
| 676 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 1 |
def A ( snake_case__ : Union[str, Any] ) -> bool:
'''simple docstring'''
__snake_case = 0
for ch in input_str:
__snake_case = ord(SCREAMING_SNAKE_CASE_ )
__snake_case = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A ( snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def A ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[int]=True ) -> Any:
'''simple docstring'''
model.train()
__snake_case = model(__A )
__snake_case = F.mse_loss(__A , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__A )
def A ( snake_case__ : Dict , snake_case__ : Dict=False ) -> Tuple:
'''simple docstring'''
set_seed(42 )
__snake_case = RegressionModel()
__snake_case = deepcopy(__A )
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(__A , batch_size=16 )
model.to(accelerator.device )
if sched:
__snake_case = AdamW(params=model.parameters() , lr=1e-3 )
__snake_case = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__snake_case = LambdaLR(__A , lr_lambda=lambda snake_case__ : epoch**0.65 )
__snake_case = LambdaLR(__A , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(__A , __A , __A , __A )
else:
__snake_case , __snake_case = accelerator.prepare(__A , __A )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A ( snake_case__ : Dict ) -> Any:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__snake_case , __snake_case , __snake_case = get_training_setup(__A )
# Use a single batch
__snake_case , __snake_case = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__A , __A , __A , __A )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case = ddp_input[torch.randperm(len(__A ) )]
def A ( snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(__A )
# Use a single batch
__snake_case , __snake_case = next(iter(__A ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__A ):
step_model(__A , __A , __A , __A )
else:
# Sync grads
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case = ddp_input[torch.randperm(len(__A ) )]
def A ( snake_case__ : List[Any]=False , snake_case__ : Optional[Any]=False ) -> int:
'''simple docstring'''
__snake_case = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case = get_training_setup(__A )
for iteration, batch in enumerate(__A ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__A , __A , __A , __A , __A )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__A ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__snake_case = ddp_input[torch.randperm(len(__A ) )]
GradientState._reset_state()
def A ( snake_case__ : Union[str, Any]=False , snake_case__ : Dict=False ) -> List[Any]:
'''simple docstring'''
__snake_case = Accelerator(
split_batches=__A , dispatch_batches=__A , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = get_training_setup(__A , __A )
for iteration, batch in enumerate(__A ):
__snake_case , __snake_case = batch.values()
# Gather the distributed inputs and targs for the base model
__snake_case , __snake_case = accelerator.gather((ddp_input, ddp_target) )
__snake_case , __snake_case = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__A , __A , __A , __A , __A )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__A )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__A ):
step_model(__A , __A , __A , __A )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__snake_case = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__A ))
if accelerator.num_processes > 1:
check_model_parameters(__A , __A , __A , __A )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def A ( ) -> int:
'''simple docstring'''
__snake_case = Accelerator()
__snake_case = RegressionDataset(length=80 )
__snake_case = DataLoader(__A , batch_size=16 )
__snake_case = RegressionDataset(length=96 )
__snake_case = DataLoader(__A , batch_size=16 )
__snake_case , __snake_case = accelerator.prepare(__A , __A )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if iteration < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__A ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__A )
if batch_num < len(__A ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A ( ) -> Dict:
'''simple docstring'''
__snake_case = Accelerator()
__snake_case = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(__A )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(__A )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(__A , __A )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(__A , __A )
def A ( snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : str , snake_case__ : Dict=True , snake_case__ : Optional[Any]="pt" ) -> str:
'''simple docstring'''
__snake_case = {'add_prefix_space': True} if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not line.startswith(' ' ) else {}
__snake_case = padding_side
return tokenizer(
[line] , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' if pad_to_max_length else None , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def A ( snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str=None , ) -> Any:
'''simple docstring'''
__snake_case = input_ids.ne(_SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase ( snake_case__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_="train" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="" , ) -> Tuple:
super().__init__()
__snake_case = Path(UpperCAmelCase_).joinpath(type_path + '.source')
__snake_case = Path(UpperCAmelCase_).joinpath(type_path + '.target')
__snake_case = self.get_char_lens(self.src_file)
__snake_case = max_source_length
__snake_case = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__snake_case = tokenizer
__snake_case = prefix
if n_obs is not None:
__snake_case = self.src_lens[:n_obs]
__snake_case = src_lang
__snake_case = tgt_lang
def __len__( self) -> List[Any]:
return len(self.src_lens)
def __getitem__( self , lowercase_) -> Dict[str, torch.Tensor]:
__snake_case = index + 1 # linecache starts at 1
__snake_case = self.prefix + linecache.getline(str(self.src_file) , UpperCAmelCase_).rstrip('\n')
__snake_case = linecache.getline(str(self.tgt_file) , UpperCAmelCase_).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCAmelCase_):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase_) else self.tokenizer
)
__snake_case = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase_) else self.tokenizer
__snake_case = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_source_length , 'right')
__snake_case = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_target_length , 'right')
__snake_case = source_inputs['input_ids'].squeeze()
__snake_case = target_inputs['input_ids'].squeeze()
__snake_case = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( lowercase_) -> List[str]:
return [len(UpperCAmelCase_) for x in Path(UpperCAmelCase_).open().readlines()]
def _a ( self , lowercase_) -> Dict[str, torch.Tensor]:
__snake_case = torch.stack([x['input_ids'] for x in batch])
__snake_case = torch.stack([x['attention_mask'] for x in batch])
__snake_case = torch.stack([x['decoder_input_ids'] for x in batch])
__snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase_)
else self.tokenizer.pad_token_id
)
__snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase_)
else self.tokenizer.pad_token_id
)
__snake_case = trim_batch(UpperCAmelCase_ , UpperCAmelCase_)
__snake_case , __snake_case = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
__snake_case = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ : List[Any] = getLogger(__name__)
def A ( snake_case__ : List[Any] ) -> int:
'''simple docstring'''
return list(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) )
def A ( snake_case__ : str ) -> None:
'''simple docstring'''
__snake_case = get_git_info()
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'git_log.json' ) )
def A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str]=4 , **snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( snake_case__ : List[str] ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def A ( ) -> Any:
'''simple docstring'''
__snake_case = git.Repo(search_parent_directories=_SCREAMING_SNAKE_CASE )
__snake_case = {
'repo_id': str(_SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( snake_case__ : str , snake_case__ : int ) -> List:
'''simple docstring'''
return list(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def A ( snake_case__ : Any , snake_case__ : str ) -> Dict:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
return pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Dict ) -> str:
'''simple docstring'''
def remove_articles(snake_case__ : List[str] ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(snake_case__ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(snake_case__ : Union[str, Any] ):
__snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def A ( snake_case__ : List[str] , snake_case__ : str ) -> Any:
'''simple docstring'''
__snake_case = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
__snake_case = normalize_answer(_SCREAMING_SNAKE_CASE ).split()
__snake_case = Counter(_SCREAMING_SNAKE_CASE ) & Counter(_SCREAMING_SNAKE_CASE )
__snake_case = sum(common.values() )
if num_same == 0:
return 0
__snake_case = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
__snake_case = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
__snake_case = (2 * precision * recall) / (precision + recall)
return fa
def A ( snake_case__ : Any , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
return normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE )
def A ( snake_case__ : List[Any] , snake_case__ : str ) -> Dict:
'''simple docstring'''
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
__snake_case = 0
for hypo, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
em += exact_match_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
em /= len(_SCREAMING_SNAKE_CASE )
return {"em": em}
def A ( snake_case__ : Any ) -> Dict:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( snake_case__ : int , snake_case__ : Dict , snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
__snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case = 'dropout_rate'
for p in extra_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not hasattr(_SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
__snake_case = p if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return hparams, config
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=1_8 , lowercase_=3_0 , lowercase_=4_0_0 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size if size is not None else {'height': 1_8, 'width': 2_0}
__snake_case = do_thumbnail
__snake_case = do_align_axis
__snake_case = do_pad
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
def _a ( self) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCAmelCase = DonutImageProcessor if is_vision_available() else None
def _a ( self) -> List[Any]:
__snake_case = DonutImageProcessingTester(self)
@property
def _a ( self) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self) -> int:
__snake_case = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A_ , 'do_resize'))
self.assertTrue(hasattr(A_ , 'size'))
self.assertTrue(hasattr(A_ , 'do_thumbnail'))
self.assertTrue(hasattr(A_ , 'do_align_long_axis'))
self.assertTrue(hasattr(A_ , 'do_pad'))
self.assertTrue(hasattr(A_ , 'do_normalize'))
self.assertTrue(hasattr(A_ , 'image_mean'))
self.assertTrue(hasattr(A_ , 'image_std'))
def _a ( self) -> str:
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0})
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2})
# Previous config had dimensions in (width, height) order
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4))
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2})
def _a ( self) -> Optional[Any]:
pass
@is_flaky()
def _a ( self) -> int:
__snake_case = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_)
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image)
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _a ( self) -> Any:
__snake_case = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray)
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def _a ( self) -> Union[str, Any]:
__snake_case = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_)
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor)
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__snake_case = image_processing(A_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''instructblip_vision_model'''
def __init__( self , lowercase_=1_4_0_8 , lowercase_=6_1_4_4 , lowercase_=3_9 , lowercase_=1_6 , lowercase_=2_2_4 , lowercase_=1_4 , lowercase_="gelu" , lowercase_=1e-6 , lowercase_=0.0 , lowercase_=1e-10 , lowercase_=True , **lowercase_ , ) -> Any:
super().__init__(**UpperCamelCase_)
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = patch_size
__snake_case = image_size
__snake_case = initializer_range
__snake_case = attention_dropout
__snake_case = layer_norm_eps
__snake_case = hidden_act
__snake_case = qkv_bias
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_)
__snake_case = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''instructblip_qformer'''
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=2 , lowercase_=1_4_0_8 , **lowercase_ , ) -> Any:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = cross_attention_frequency
__snake_case = encoder_hidden_size
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase_)
__snake_case = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
__snake_case = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''instructblip'''
__UpperCAmelCase = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=3_2 , **lowercase_) -> List[str]:
super().__init__(**UpperCamelCase_)
if vision_config is None:
__snake_case = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')
if qformer_config is None:
__snake_case = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')
if text_config is None:
__snake_case = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
__snake_case = InstructBlipVisionConfig(**UpperCamelCase_)
__snake_case = InstructBlipQFormerConfig(**UpperCamelCase_)
__snake_case = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case = CONFIG_MAPPING[text_model_type](**UpperCamelCase_)
__snake_case = self.text_config.tie_word_embeddings
__snake_case = self.text_config.is_encoder_decoder
__snake_case = num_query_tokens
__snake_case = self.vision_config.hidden_size
__snake_case = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case = 1.0
__snake_case = 0.02
@classmethod
def _a ( cls , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , ) -> List[str]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def _a ( self) -> List[str]:
__snake_case = copy.deepcopy(self.__dict__)
__snake_case = self.vision_config.to_dict()
__snake_case = self.qformer_config.to_dict()
__snake_case = self.text_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class __lowercase ( lowercase__ ):
__UpperCAmelCase = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCAmelCase = Features({'''text''': Value('''string''' )} )
__UpperCAmelCase = Features({'''summary''': Value('''string''' )} )
__UpperCAmelCase = '''text'''
__UpperCAmelCase = '''summary'''
@property
def _a ( self) -> Dict:
return {self.text_column: "text", self.summary_column: "summary"}
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowercase :
__UpperCAmelCase = 42
__UpperCAmelCase = None
__UpperCAmelCase = None
UpperCAmelCase__ : Any = namedtuple("CoinsDistribResult", "moves excess")
def A ( snake_case__ : List[str] ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case__ : Optional[Any] ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case__ : List[Any] ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase_ ) != count_coins(lowerCamelCase_ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(snake_case__ : Any ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case = get_distrib(node.left )
__snake_case = get_distrib(node.right )
__snake_case = 1 - left_distrib_excess
__snake_case = 1 - right_distrib_excess
__snake_case = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase_ )
+ abs(lowerCamelCase_ )
)
__snake_case = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase_ , lowerCamelCase_ )
return get_distrib(lowerCamelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase : int = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
lowerCAmelCase : str = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
lowerCAmelCase : Tuple = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _a ( self) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="auto" , lowercase_=-1 , lowercase_=0.9 , lowercase_=5 , lowercase_=5_0_0 , lowercase_="gpt2-large" , lowercase_=-1 , lowercase_=1_0_2_4 , lowercase_=2_5 , lowercase_=5 , lowercase_=True , lowercase_=2_5 , ) -> str:
__snake_case = compute_mauve(
p_text=_lowercase , q_text=_lowercase , p_features=_lowercase , q_features=_lowercase , p_tokens=_lowercase , q_tokens=_lowercase , num_buckets=_lowercase , pca_max_data=_lowercase , kmeans_explained_var=_lowercase , kmeans_num_redo=_lowercase , kmeans_max_iter=_lowercase , featurize_model_name=_lowercase , device_id=_lowercase , max_text_length=_lowercase , divergence_curve_discretization_size=_lowercase , mauve_scaling_factor=_lowercase , verbose=_lowercase , seed=_lowercase , )
return out
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def A ( snake_case__ : List[str]=None , snake_case__ : Any=None ) -> List[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class __lowercase :
__UpperCAmelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
__UpperCAmelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
__UpperCAmelCase = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
__UpperCAmelCase = field(
default=__A , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
__UpperCAmelCase = field(
default=__A , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
__UpperCAmelCase = field(
default=__A , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Benchmark training of model'''} )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Verbose memory tracing'''} )
__UpperCAmelCase = field(
default=__A , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
__UpperCAmelCase = field(
default=__A , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Trace memory line by line'''} )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Save result to a CSV file'''} )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Save all print statements in a log file'''} )
__UpperCAmelCase = field(default=__A , metadata={'''help''': '''Whether to print environment information'''} )
__UpperCAmelCase = field(
default=__A , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
__UpperCAmelCase = field(
default=f"inference_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
__UpperCAmelCase = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
__UpperCAmelCase = field(
default=f"train_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
__UpperCAmelCase = field(
default=f"train_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
__UpperCAmelCase = field(
default=f"env_info_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
__UpperCAmelCase = field(
default=f"log_{round(time() )}.csv" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
__UpperCAmelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
__UpperCAmelCase = field(
default=__A , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def _a ( self) -> List[str]:
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , lowercase_ , )
def _a ( self) -> Optional[Any]:
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def _a ( self) -> List[str]:
if len(self.models) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].')
return self.models
@property
def _a ( self) -> Optional[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
def A ( snake_case__ : list[list[float]] ) -> int:
'''simple docstring'''
__snake_case = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> Tuple:
'''simple docstring'''
__snake_case = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
__snake_case = min(__lowerCAmelCase )
__snake_case = max(__lowerCAmelCase )
__snake_case = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__snake_case = f"Invalid weight of {weight:f} provided"
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def A ( snake_case__ : list[list[float]] ) -> List[str]:
'''simple docstring'''
__snake_case = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
__snake_case = final_scores[j] + ele
return final_scores
def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = get_data(__lowerCAmelCase )
__snake_case = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A ( snake_case__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = StableDiffusionLatentUpscalePipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase = frozenset([] )
__UpperCAmelCase = True
@property
def _a ( self) -> Tuple:
__snake_case = 1
__snake_case = 4
__snake_case = (1_6, 1_6)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCamelCase_)
return image
def _a ( self) -> Tuple:
torch.manual_seed(0)
__snake_case = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
__snake_case = EulerDiscreteScheduler(prediction_type='sample')
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
__snake_case = CLIPTextModel(UpperCamelCase_)
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__snake_case = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _a ( self , lowercase_ , lowercase_=0) -> List[Any]:
if str(UpperCamelCase_).startswith('mps'):
__snake_case = torch.manual_seed(UpperCamelCase_)
else:
__snake_case = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a ( self) -> List[str]:
__snake_case = "cpu"
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**UpperCamelCase_)
pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__snake_case = self.get_dummy_inputs(UpperCamelCase_)
__snake_case = pipe(**UpperCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3))
__snake_case = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055])
__snake_case = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCamelCase_ , 1e-3)
def _a ( self) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3)
def _a ( self) -> List[str]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3)
def _a ( self) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def _a ( self) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
def _a ( self) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3)
def _a ( self) -> Tuple:
super().test_save_load_local(expected_max_difference=3e-3)
def _a ( self) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3)
def _a ( self) -> Tuple:
__snake_case = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**UpperCamelCase_)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_)
pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__snake_case = self.get_dummy_inputs(UpperCamelCase_)
__snake_case = 2
__snake_case = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__snake_case = getattr(UpperCamelCase_ , scheduler_enum.name)
__snake_case = scheduler_cls.from_config(pipe.scheduler.config)
__snake_case = pipe(**UpperCamelCase_)[0]
outputs.append(UpperCamelCase_)
assert check_same_shape(UpperCamelCase_)
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Any:
__snake_case = torch.manual_seed(3_3)
__snake_case = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
__snake_case = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__snake_case = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__snake_case = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type='latent').images
__snake_case = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=2_0 , guidance_scale=0 , generator=UpperCamelCase_ , output_type='np' , ).images[0]
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5e-2
def _a ( self) -> Any:
__snake_case = torch.manual_seed(3_3)
__snake_case = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
__snake_case = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
__snake_case = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=2_0 , guidance_scale=0 , generator=UpperCamelCase_ , output_type='np' , ).images[0]
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5e-2
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
__snake_case = 0
__snake_case = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
__snake_case = [int(_lowerCAmelCase ) for i in num_string]
__snake_case = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
__snake_case = str(_lowerCAmelCase )
steps += 1
return steps
def A ( snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
__snake_case = 0
__snake_case = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
__snake_case = [int(_lowerCAmelCase ) for i in num_string]
__snake_case = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
__snake_case = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import unittest
import numpy as np
def A ( snake_case__ : Any , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] = None , ) -> np.ndarray:
'''simple docstring'''
__snake_case = np.shape(__lowerCAmelCase )
__snake_case = np.shape(__lowerCAmelCase )
__snake_case = np.shape(__lowerCAmelCase )
if shape_a[0] != shape_b[0]:
__snake_case = (
"""Expected the same number of rows for A and B. """
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__lowerCAmelCase )
if shape_b[1] != shape_c[1]:
__snake_case = (
"""Expected the same number of columns for B and C. """
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__lowerCAmelCase )
__snake_case = pseudo_inv
if a_inv is None:
try:
__snake_case = np.linalg.inv(__lowerCAmelCase )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowercase ( unittest.TestCase ):
def _a ( self) -> None:
__snake_case = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
__snake_case = np.array([[0, 3], [3, 0], [2, 3]])
__snake_case = np.array([[2, 1], [6, 3]])
__snake_case = schur_complement(_a , _a , _a)
__snake_case = np.block([[a, b], [b.T, c]])
__snake_case = np.linalg.det(_a)
__snake_case = np.linalg.det(_a)
__snake_case = np.linalg.det(_a)
self.assertAlmostEqual(_a , det_a * det_s)
def _a ( self) -> None:
__snake_case = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
__snake_case = np.array([[0, 3], [3, 0], [2, 3]])
__snake_case = np.array([[2, 1], [6, 3]])
with self.assertRaises(_a):
schur_complement(_a , _a , _a)
def _a ( self) -> None:
__snake_case = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
__snake_case = np.array([[0, 3], [3, 0], [2, 3]])
__snake_case = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(_a):
schur_complement(_a , _a , _a)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : int = "bart"
UpperCAmelCase__ : int = True
@st.cache(allow_output_mutation=__UpperCAmelCase )
def A ( ) -> List[str]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__snake_case = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__snake_case = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__snake_case = qar_model.eval()
else:
__snake_case , __snake_case = (None, None)
if MODEL_TYPE == "bart":
__snake_case = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__snake_case = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__snake_case = sas_model.eval()
else:
__snake_case , __snake_case = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def A ( ) -> List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__snake_case = faiss.StandardGpuResources()
__snake_case = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__snake_case = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case = faiss.IndexFlatIP(128 )
__snake_case = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase )
wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case = (None, None)
__snake_case = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__UpperCAmelCase )
def A ( ) -> int:
'''simple docstring'''
__snake_case = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__snake_case = elia['train_eli5']
__snake_case = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__snake_case = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__UpperCAmelCase )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = load_models()
UpperCAmelCase__ , UpperCAmelCase__ : Dict = load_train_data()
def A ( snake_case__ : List[Any] , snake_case__ : List[Any]=10 ) -> Optional[Any]:
'''simple docstring'''
__snake_case = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase )
__snake_case , __snake_case = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase )
__snake_case = [elia_train[int(__UpperCAmelCase )] for i in I[0]]
return nn_examples
def A ( snake_case__ : str , snake_case__ : List[str]="wiki40b" , snake_case__ : str="dense" , snake_case__ : Union[str, Any]=10 ) -> Optional[int]:
'''simple docstring'''
if source == "none":
__snake_case , __snake_case = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case = query_qa_dense_index(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__snake_case , __snake_case = query_es_index(
__UpperCAmelCase , __UpperCAmelCase , index_name='english_wiki40b_snippets_100w' , n_results=__UpperCAmelCase , )
__snake_case = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__snake_case = 'question: {} context: {}'.format(__UpperCAmelCase , __UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def A ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any]=64 , snake_case__ : Optional[int]=256 , snake_case__ : List[Any]=False , snake_case__ : int=2 , snake_case__ : Union[str, Any]=0.95 , snake_case__ : Optional[int]=0.8 ) -> Any:
'''simple docstring'''
with torch.no_grad():
__snake_case = qa_sas_generate(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ : List[Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ : Optional[int] = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : Any = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ : List[Any] = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ : List[Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ : Tuple = action_list.index(action_st)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ : Optional[int] = show_type == "Show full text of passages"
else:
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ : int = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : List[str] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ : Tuple = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ : Tuple = "wiki40b"
UpperCAmelCase__ : Any = "dense"
UpperCAmelCase__ : Optional[int] = "beam"
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Optional[Any] = 64
UpperCAmelCase__ : int = 2_56
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Tuple = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ : List[Any] = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Tuple = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ : str = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : int = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : str = None
# start main text
UpperCAmelCase__ : int = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ : Optional[Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Union[str, Any] = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ : List[str] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ : int = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : Union[str, Any] = support_list[:10]
UpperCAmelCase__ : int = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ : Any = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ : Optional[int] = sec_titles.split(" & ")
UpperCAmelCase__ : Tuple = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : str = find_nearest_training(question)
UpperCAmelCase__ : Tuple = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ : Optional[Any] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ : Tuple = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from __future__ import annotations
import math
def A ( snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
def A ( ) -> int:
'''simple docstring'''
__snake_case = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case = math.log(len(lowerCAmelCase__ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( snake_case__ : int , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]="pt" ) -> str:
'''simple docstring'''
__snake_case = {"""add_prefix_space""": True} if isinstance(__A , __A ) and not line.startswith(' ' ) else {}
__snake_case = padding_side
return tokenizer(
[line] , max_length=__A , padding='max_length' if pad_to_max_length else None , truncation=__A , return_tensors=__A , add_special_tokens=__A , **__A , )
def A ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str=None , ) -> Tuple:
'''simple docstring'''
__snake_case = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase ( __lowerCAmelCase ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_="train" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="" , ) -> str:
super().__init__()
__snake_case = Path(_UpperCamelCase).joinpath(type_path + '.source')
__snake_case = Path(_UpperCamelCase).joinpath(type_path + '.target')
__snake_case = self.get_char_lens(self.src_file)
__snake_case = max_source_length
__snake_case = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__snake_case = tokenizer
__snake_case = prefix
if n_obs is not None:
__snake_case = self.src_lens[:n_obs]
__snake_case = src_lang
__snake_case = tgt_lang
def __len__( self) -> Union[str, Any]:
return len(self.src_lens)
def __getitem__( self , lowercase_) -> Dict[str, torch.Tensor]:
__snake_case = index + 1 # linecache starts at 1
__snake_case = self.prefix + linecache.getline(str(self.src_file) , _UpperCamelCase).rstrip('\n')
__snake_case = linecache.getline(str(self.tgt_file) , _UpperCamelCase).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase) else self.tokenizer
)
__snake_case = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase) else self.tokenizer
__snake_case = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , 'right')
__snake_case = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , 'right')
__snake_case = source_inputs["""input_ids"""].squeeze()
__snake_case = target_inputs["""input_ids"""].squeeze()
__snake_case = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( lowercase_) -> Optional[Any]:
return [len(_UpperCamelCase) for x in Path(_UpperCamelCase).open().readlines()]
def _a ( self , lowercase_) -> Dict[str, torch.Tensor]:
__snake_case = torch.stack([x['input_ids'] for x in batch])
__snake_case = torch.stack([x['attention_mask'] for x in batch])
__snake_case = torch.stack([x['decoder_input_ids'] for x in batch])
__snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase)
else self.tokenizer.pad_token_id
)
__snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase)
else self.tokenizer.pad_token_id
)
__snake_case = trim_batch(_UpperCamelCase , _UpperCamelCase)
__snake_case = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase)
__snake_case = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
UpperCAmelCase__ : Dict = getLogger(__name__)
def A ( snake_case__ : Tuple ) -> str:
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def A ( snake_case__ : int ) -> Dict:
'''simple docstring'''
__snake_case = get_git_info()
save_json(__A , os.path.join(__A , 'git_log.json' ) )
def A ( snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : str=4 , **snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
with open(__A , 'w' ) as f:
json.dump(__A , __A , indent=__A , **__A )
def A ( snake_case__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def A ( ) -> Any:
'''simple docstring'''
__snake_case = git.Repo(search_parent_directories=__A )
__snake_case = {
"""repo_id""": str(__A ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def A ( snake_case__ : Optional[Any] , snake_case__ : str ) -> Any:
'''simple docstring'''
return list(map(__A , __A ) )
def A ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with open(__A , 'wb' ) as f:
return pickle.dump(__A , __A )
def A ( snake_case__ : Union[str, Any] ) -> Any:
'''simple docstring'''
def remove_articles(snake_case__ : str ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __A )
def white_space_fix(snake_case__ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(snake_case__ : List[str] ):
__snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def A ( snake_case__ : int , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = normalize_answer(__A ).split()
__snake_case = normalize_answer(__A ).split()
__snake_case = Counter(__A ) & Counter(__A )
__snake_case = sum(common.values() )
if num_same == 0:
return 0
__snake_case = 1.0 * num_same / len(__A )
__snake_case = 1.0 * num_same / len(__A )
__snake_case = (2 * precision * recall) / (precision + recall)
return fa
def A ( snake_case__ : List[str] , snake_case__ : List[str] ) -> str:
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def A ( snake_case__ : Dict , snake_case__ : int ) -> Tuple:
'''simple docstring'''
assert len(__A ) == len(__A )
__snake_case = 0
for hypo, pred in zip(__A , __A ):
em += exact_match_score(__A , __A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def A ( snake_case__ : Any ) -> Tuple:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__snake_case = """dropout_rate"""
for p in extra_params:
if getattr(__A , __A , __A ):
if not hasattr(__A , __A ) and not hasattr(__A , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__A ) )
delattr(__A , __A )
continue
__snake_case = p if hasattr(__A , __A ) else equivalent_param[p]
setattr(__A , __A , getattr(__A , __A ) )
delattr(__A , __A )
return hparams, config
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
def A ( snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict ) -> int:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCamelCase ) )
def A ( snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
if index == len(_lowerCamelCase ):
return True
# Recursive Step
for i in range(_lowerCamelCase ):
if valid_coloring(graph[index] , _lowerCamelCase , _lowerCamelCase ):
# Color current vertex
__snake_case = i
# Validate coloring
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ):
return True
# Backtrack
__snake_case = -1
return False
def A ( snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = [-1] * len(_lowerCamelCase )
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 0 ):
return colored_vertices
return []
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import requests
UpperCAmelCase__ : List[Any] = "" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ : Optional[Any] = "https://api.openweathermap.org/data/2.5/"
def A ( snake_case__ : str = "Chicago" , snake_case__ : List[str] = APPID ) -> str:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( snake_case__ : Any = "Kolkata, India" , snake_case__ : Any = APPID ) -> Union[str, Any]:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( snake_case__ : Dict = 55.68 , snake_case__ : int = 12.57 , snake_case__ : Any = APPID ) -> int:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ : List[Any] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( snake_case__ : Tuple , snake_case__ : str=0.999 , snake_case__ : List[Any]="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__snake_case = []
for i in range(a_ ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) , a_ ) )
return torch.tensor(a_ , dtype=torch.floataa )
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase = 2
@register_to_config
def __init__( self , lowercase_ = 1_0_0_0 , lowercase_ = 0.0_0085 , lowercase_ = 0.012 , lowercase_ = "linear" , lowercase_ = None , lowercase_ = "epsilon" , lowercase_ = False , lowercase_ = False , lowercase_ = 1.0 , lowercase_ = "linspace" , lowercase_ = 0 , ) -> Tuple:
if trained_betas is not None:
__snake_case = torch.tensor(lowerCamelCase_ , dtype=torch.floataa)
elif beta_schedule == "linear":
__snake_case = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(lowerCamelCase_ , alpha_transform_type='cosine')
elif beta_schedule == "exp":
__snake_case = betas_for_alpha_bar(lowerCamelCase_ , alpha_transform_type='exp')
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}")
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
__snake_case = use_karras_sigmas
def _a ( self , lowercase_ , lowercase_=None) -> Dict:
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
__snake_case = 1 if len(lowerCamelCase_) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _a ( self) -> str:
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _a ( self , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
__snake_case = self.index_for_timestep(lowerCamelCase_)
__snake_case = self.sigmas[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ) -> str:
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_)[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , lowerCamelCase_) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(lowerCamelCase_ , 0 , -step_ratio)).round().copy().astype(lowerCamelCase_)
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.")
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
__snake_case = np.log(lowerCamelCase_)
__snake_case = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_)) , lowerCamelCase_)
if self.config.use_karras_sigmas:
__snake_case = self._convert_to_karras(in_sigmas=lowerCamelCase_ , num_inference_steps=self.num_inference_steps)
__snake_case = np.array([self._sigma_to_t(lowerCamelCase_ , lowerCamelCase_) for sigma in sigmas])
__snake_case = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
__snake_case = torch.from_numpy(lowerCamelCase_).to(device=lowerCamelCase_)
__snake_case = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
__snake_case = torch.from_numpy(lowerCamelCase_)
__snake_case = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
if str(lowerCamelCase_).startswith('mps'):
# mps does not support float64
__snake_case = timesteps.to(lowerCamelCase_ , dtype=torch.floataa)
else:
__snake_case = timesteps.to(device=lowerCamelCase_)
# empty dt and derivative
__snake_case = None
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(lowerCamelCase_)
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
__snake_case = np.log(lowerCamelCase_)
# get distribution
__snake_case = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__snake_case = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
__snake_case = low_idx + 1
__snake_case = log_sigmas[low_idx]
__snake_case = log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = np.clip(lowerCamelCase_ , 0 , 1)
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.reshape(sigma.shape)
return t
def _a ( self , lowercase_ , lowercase_) -> torch.FloatTensor:
__snake_case = in_sigmas[-1].item()
__snake_case = in_sigmas[0].item()
__snake_case = 7.0 # 7.0 is the value used in the paper
__snake_case = np.linspace(0 , 1 , lowerCamelCase_)
__snake_case = sigma_min ** (1 / rho)
__snake_case = sigma_max ** (1 / rho)
__snake_case = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _a ( self) -> Optional[int]:
return self.dt is None
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[SchedulerOutput, Tuple]:
__snake_case = self.index_for_timestep(lowerCamelCase_)
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.config.clip_sample:
__snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
# store for 2nd order step
__snake_case = derivative
__snake_case = dt
__snake_case = sample
else:
# 2. 2nd order / Heun's method
__snake_case = (sample - pred_original_sample) / sigma_next
__snake_case = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__snake_case = self.dt
__snake_case = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa)
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
__snake_case = self.timesteps.to(original_samples.device)
__snake_case = timesteps.to(original_samples.device)
__snake_case = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
__snake_case = sigma.unsqueeze(-1)
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> int:
return self.config.num_train_timesteps
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase__ : Tuple = """__DUMMY_TRANSFORMERS_USER__"""
UpperCAmelCase__ : Dict = """Dummy User"""
UpperCAmelCase__ : Union[str, Any] = """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"""
UpperCAmelCase__ : Union[str, Any] = """https://hub-ci.huggingface.co"""
UpperCAmelCase__ : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
UpperCAmelCase__ : Union[str, Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
UpperCAmelCase__ : List[Any] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def A ( snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , snake_case__ )
@pytest.fixture
def A ( snake_case__ : Tuple ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , snake_case__ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , snake_case__ )
@pytest.fixture
def A ( snake_case__ : Tuple ) -> int:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , snake_case__ )
@pytest.fixture
def A ( snake_case__ : str , snake_case__ : Tuple ) -> str:
'''simple docstring'''
HfFolder.save_token(snake_case__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=snake_case__ )
@pytest.fixture(scope='session' )
def A ( snake_case__ : Tuple ) -> int:
'''simple docstring'''
__snake_case = HfFolder.get_token()
HfFolder.save_token(snake_case__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case__ )
@pytest.fixture
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
def _cleanup_repo(snake_case__ : Optional[Any] ):
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
@contextmanager
def _temporary_repo(snake_case__ : Any ):
try:
yield repo_id
finally:
cleanup_repo(snake_case__ )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
__snake_case = f"repo_txt_data-{int(time.time() * 10e3 )}"
__snake_case = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data/text_data.txt' , repo_id=snake_case__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case = f"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
__snake_case = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data.zip' , repo_id=snake_case__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = f"repo_zipped_img_data-{int(time.time() * 10e3 )}"
__snake_case = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(snake_case__ , token=snake_case__ , repo_type='dataset' , private=snake_case__ )
hf_api.upload_file(
token=snake_case__ , path_or_fileobj=str(snake_case__ ) , path_in_repo='data.zip' , repo_id=snake_case__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(snake_case__ , token=snake_case__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def A ( *snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__snake_case = list(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
__snake_case = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def A ( snake_case__ : Exception ) -> Optional[Any]:
'''simple docstring'''
__snake_case = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def A ( snake_case__ : callable = None , snake_case__ : int = 128 ) -> Tuple:
'''simple docstring'''
if function is None:
return functools.partial(__UpperCamelCase , starting_batch_size=__UpperCamelCase )
__snake_case = starting_batch_size
def decorator(*snake_case__ : Dict , **snake_case__ : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
__snake_case = list(inspect.signature(__UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(__UpperCamelCase ) < (len(__UpperCamelCase ) + 1):
__snake_case = """, """.join([f"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"Batch size was passed into `{function.__name__}` as the first argument when called."
f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(__UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase__ : int = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class __lowercase ( _lowercase ):
def __init__( self , **lowercase_) -> int:
super().__init__(**A_)
requires_backends(self , 'vision')
requires_backends(self , 'torch')
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch.")
self.check_model_type(A_)
def _a ( self , **lowercase_) -> Union[str, Any]:
__snake_case = {}
__snake_case = {}
__snake_case = {}
# preprocess args
if "points_per_batch" in kwargs:
__snake_case = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
__snake_case = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
__snake_case = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
__snake_case = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
__snake_case = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
__snake_case = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
__snake_case = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
__snake_case = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
__snake_case = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
__snake_case = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
__snake_case = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
__snake_case = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , lowercase_ , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Any:
return super().__call__(A_ , *A_ , num_workers=A_ , batch_size=A_ , **A_)
def _a ( self , lowercase_ , lowercase_=6_4 , lowercase_ = 0 , lowercase_ = 5_1_2 / 1_5_0_0 , lowercase_ = 3_2 , lowercase_ = 1 , ) -> Dict:
__snake_case = load_image(A_)
__snake_case = self.image_processor.size['longest_edge']
__snake_case , __snake_case , __snake_case , __snake_case = self.image_processor.generate_crop_boxes(
A_ , A_ , A_ , A_ , A_ , A_)
__snake_case = self.image_processor(images=A_ , return_tensors='pt')
with self.device_placement():
if self.framework == "pt":
__snake_case = self.get_inference_context()
with inference_context():
__snake_case = self._ensure_tensor_on_device(A_ , device=self.device)
__snake_case = self.model.get_image_embeddings(model_inputs.pop('pixel_values'))
__snake_case = image_embeddings
__snake_case = grid_points.shape[1]
__snake_case = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None')
for i in range(0 , A_ , A_):
__snake_case = grid_points[:, i : i + points_per_batch, :, :]
__snake_case = input_labels[:, i : i + points_per_batch]
__snake_case = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a ( self , lowercase_ , lowercase_=0.88 , lowercase_=0.95 , lowercase_=0 , lowercase_=1 , ) -> int:
__snake_case = model_inputs.pop('input_boxes')
__snake_case = model_inputs.pop('is_last')
__snake_case = model_inputs.pop('original_sizes').tolist()
__snake_case = model_inputs.pop('reshaped_input_sizes').tolist()
__snake_case = self.model(**A_)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__snake_case = model_outputs['pred_masks']
__snake_case = self.image_processor.post_process_masks(
A_ , A_ , A_ , A_ , binarize=A_)
__snake_case = model_outputs['iou_scores']
__snake_case , __snake_case , __snake_case = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A_ , A_ , A_ , A_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a ( self , lowercase_ , lowercase_=False , lowercase_=False , lowercase_=0.7 , ) -> List[str]:
__snake_case = []
__snake_case = []
__snake_case = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores'))
all_masks.extend(model_output.pop('masks'))
all_boxes.append(model_output.pop('boxes'))
__snake_case = torch.cat(A_)
__snake_case = torch.cat(A_)
__snake_case , __snake_case , __snake_case , __snake_case = self.image_processor.post_process_for_mask_generation(
A_ , A_ , A_ , A_)
__snake_case = defaultdict(A_)
for output in model_outputs:
for k, v in output.items():
extra[k].append(A_)
__snake_case = {}
if output_rle_mask:
__snake_case = rle_mask
if output_bboxes_mask:
__snake_case = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
import math
def A ( snake_case__ : Any ) -> list[int]:
'''simple docstring'''
__snake_case = []
__snake_case = 2
__snake_case = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
__snake_case = [True] * (end + 1)
__snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
__snake_case = False
start += 1
prime += in_prime
__snake_case = end + 1
__snake_case = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
__snake_case = [True] * (high - low + 1)
for each in in_prime:
__snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
__snake_case = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
__snake_case = high + 1
__snake_case = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
from collections.abc import Callable
import numpy as np
def A ( snake_case__ : Callable , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = int(np.ceil((x_end - xa) / step_size ) )
__snake_case = np.zeros((n + 1,) )
__snake_case = ya
__snake_case = xa
for k in range(snake_case__ ):
__snake_case = y[k] + step_size * ode_func(snake_case__ , y[k] )
__snake_case = y[k] + (
(step_size / 2) * (ode_func(snake_case__ , y[k] ) + ode_func(x + step_size , snake_case__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
import math
def A ( snake_case__ : list , snake_case__ : int = 0 , snake_case__ : int = 0 ) -> int:
'''simple docstring'''
__snake_case = end or len(snake_case_ )
for i in range(snake_case_ , snake_case_ ):
__snake_case = i
__snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case = array[temp_index - 1]
temp_index -= 1
__snake_case = temp_index_value
return array
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int ) -> List[Any]: # Max Heap
'''simple docstring'''
__snake_case = index
__snake_case = 2 * index + 1 # Left Node
__snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case = right_index
if largest != index:
__snake_case = array[largest], array[index]
heapify(snake_case_ , snake_case_ , snake_case_ )
def A ( snake_case__ : list ) -> Optional[Any]:
'''simple docstring'''
__snake_case = len(snake_case_ )
for i in range(n // 2 , -1 , -1 ):
heapify(snake_case_ , snake_case_ , snake_case_ )
for i in range(n - 1 , 0 , -1 ):
__snake_case = array[0], array[i]
heapify(snake_case_ , 0 , snake_case_ )
return array
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Dict:
'''simple docstring'''
__snake_case = low
__snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case = array[j], array[i]
i += 1
def A ( snake_case__ : list ) -> Dict:
'''simple docstring'''
if len(snake_case_ ) == 0:
return array
__snake_case = 2 * math.ceil(math.loga(len(snake_case_ ) ) )
__snake_case = 16
return intro_sort(snake_case_ , 0 , len(snake_case_ ) , snake_case_ , snake_case_ )
def A ( snake_case__ : list , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Any:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case_ )
max_depth -= 1
__snake_case = median_of_a(snake_case_ , snake_case_ , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case = partition(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
intro_sort(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__snake_case = p
return insertion_sort(snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[Any] = input("Enter numbers separated by a comma : ").strip()
UpperCAmelCase__ : int = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
def A ( snake_case__ : int = 10 , snake_case__ : int = 1000 , snake_case__ : bool = True ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def A ( snake_case__ : int , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def A ( snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(snake_case__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__snake_case = lower
__snake_case = higher
__snake_case = []
while True:
__snake_case = get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
__snake_case = number
elif answer(_lowerCamelCase ) == "high":
__snake_case = number
else:
break
print(f"guess the number : {last_numbers[-1]}" )
print(f"details : {last_numbers!s}" )
def A ( ) -> Tuple:
'''simple docstring'''
__snake_case = int(input('Enter lower value : ' ).strip() )
__snake_case = int(input('Enter high value : ' ).strip() )
__snake_case = int(input('Enter value to guess : ' ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
def A ( snake_case__ : int ) -> str:
'''simple docstring'''
__snake_case = generate_pascal_triangle(snake_case__ )
for row_idx in range(snake_case__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def A ( snake_case__ : int ) -> Any:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__snake_case = []
for current_row_idx in range(snake_case__ ):
__snake_case = populate_current_row(snake_case__ , snake_case__ )
triangle.append(snake_case__ )
return triangle
def A ( snake_case__ : list[list[int]] , snake_case__ : int ) -> Any:
'''simple docstring'''
__snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__snake_case = 1, 1
for current_col_idx in range(1 , snake_case__ ):
calculate_current_element(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return current_row
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , ) -> Tuple:
'''simple docstring'''
__snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
__snake_case = triangle[current_row_idx - 1][current_col_idx]
__snake_case = above_to_left_elt + above_to_right_elt
def A ( snake_case__ : int ) -> List[Any]:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__snake_case = [[1]]
for row_index in range(1 , snake_case__ ):
__snake_case = [0] + result[-1] + [0]
__snake_case = row_index + 1
# Calculate the number of distinct elements in a row
__snake_case = sum(divmod(snake_case__ , 2 ) )
__snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__snake_case = row_first_half + row_second_half
result.append(snake_case__ )
return result
def A ( ) -> List[Any]:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__ : Callable , snake_case__ : int ) -> None:
__snake_case = f"{func.__name__}({value})"
__snake_case = timeit(f"__main__.{call}" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case__ , snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = "▁"
UpperCAmelCase__ : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : List[Any] = {
"facebook/mbart-large-en-ro": 10_24,
"facebook/mbart-large-cc25": 10_24,
}
# fmt: off
UpperCAmelCase__ : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowercase ( __a ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase = []
__UpperCAmelCase = []
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , lowercase_=None , **lowercase_ , ) -> str:
__snake_case = AddedToken(a_ , lstrip=a_ , rstrip=a_) if isinstance(a_ , a_) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , tokenizer_file=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(a_))
__snake_case = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case = 1
__snake_case = len(self.sp_model)
__snake_case = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_)
}
__snake_case = {v: k for k, v in self.lang_code_to_id.items()}
__snake_case = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__snake_case = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
__snake_case = src_lang if src_lang is not None else """en_XX"""
__snake_case = self.lang_code_to_id[self._src_lang]
__snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Dict:
__snake_case = self.__dict__.copy()
__snake_case = None
__snake_case = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase_) -> Any:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _a ( self) -> int:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self) -> List[Any]:
return self._src_lang
@src_lang.setter
def _a ( self , lowercase_) -> List[str]:
__snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = False) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_)
__snake_case = [1] * len(self.prefix_tokens)
__snake_case = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(a_)) + suffix_ones
return prefix_ones + ([0] * len(a_)) + ([0] * len(a_)) + suffix_ones
def _a ( self , lowercase_ , lowercase_ = None) -> Any:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
__snake_case = src_lang
__snake_case = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_)
__snake_case = self.convert_tokens_to_ids(a_)
__snake_case = tgt_lang_id
return inputs
def _a ( self) -> Optional[int]:
__snake_case = {self.convert_ids_to_tokens(a_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _a ( self , lowercase_) -> List[str]:
return self.sp_model.encode(a_ , out_type=a_)
def _a ( self , lowercase_) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(a_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self , lowercase_) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _a ( self , lowercase_) -> List[str]:
__snake_case = """""".join(a_).replace(a_ , ' ').strip()
return out_string
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple:
if not os.path.isdir(a_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a_)
elif not os.path.isfile(self.vocab_file):
with open(a_ , 'wb') as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(a_)
return (out_vocab_file,)
def _a ( self , lowercase_ , lowercase_ = "en_XX" , lowercase_ = None , lowercase_ = "ro_RO" , **lowercase_ , ) -> Tuple:
__snake_case = src_lang
__snake_case = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_)
def _a ( self) -> Any:
return self.set_src_lang_special_tokens(self.src_lang)
def _a ( self) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _a ( self , lowercase_) -> Tuple:
__snake_case = self.lang_code_to_id[src_lang]
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
def _a ( self , lowercase_) -> int:
__snake_case = self.lang_code_to_id[lang]
__snake_case = []
__snake_case = [self.eos_token_id, self.cur_lang_code]
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
from __future__ import annotations
lowerCAmelCase : Any = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> Dict:
__snake_case = graph
# mapping node to its parent in resulting breadth first tree
__snake_case = {}
__snake_case = source_vertex
def _a ( self) -> Union[str, Any]:
__snake_case = {self.source_vertex}
__snake_case = None
__snake_case = [self.source_vertex] # first in first out queue
while queue:
__snake_case = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCAmelCase)
__snake_case = vertex
queue.append(__lowerCAmelCase)
def _a ( self , lowercase_) -> Dict:
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case = self.parent.get(__lowerCAmelCase)
if target_vertex_parent is None:
__snake_case = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowerCAmelCase)
return self.shortest_path(__lowerCAmelCase) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCAmelCase : Dict = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
def A ( ) -> Any:
'''simple docstring'''
__snake_case = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCAmelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( a__ , a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = StableUnCLIPPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__UpperCAmelCase = False
def _a ( self) -> Any:
__snake_case = 3_2
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0)
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=lowerCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowerCAmelCase__ , num_layers=1 , )
torch.manual_seed(0)
__snake_case = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=lowerCAmelCase__ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0)
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase__)
__snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__snake_case = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase__ , layers_per_block=1 , upcast_attention=lowerCAmelCase__ , use_linear_projection=lowerCAmelCase__ , )
torch.manual_seed(0)
__snake_case = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0)
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _a ( self , lowercase_ , lowercase_=0) -> Any:
if str(lowerCAmelCase__).startswith('mps'):
__snake_case = torch.manual_seed(lowerCAmelCase__)
else:
__snake_case = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _a ( self) -> Union[str, Any]:
__snake_case = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase__)
def _a ( self) -> Tuple:
__snake_case = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase__)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> int:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy')
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = pipe('anime turle' , generator=lowerCAmelCase__ , output_type='np')
__snake_case = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
def _a ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa)
__snake_case = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
def A ( snake_case__ : int ) -> Tuple:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(UpperCAmelCase__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( __UpperCAmelCase ):
__UpperCAmelCase = ["""input_features""", """is_longer"""]
def __init__( self , lowercase_=6_4 , lowercase_=4_8_0_0_0 , lowercase_=4_8_0 , lowercase_=1_0 , lowercase_=1_0_2_4 , lowercase_=0.0 , lowercase_=False , lowercase_ = 0 , lowercase_ = 1_4_0_0_0 , lowercase_ = None , lowercase_ = "fusion" , lowercase_ = "repeatpad" , **lowercase_ , ) -> int:
super().__init__(
feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
__snake_case = top_db
__snake_case = truncation
__snake_case = padding
__snake_case = fft_window_size
__snake_case = (fft_window_size >> 1) + 1
__snake_case = hop_length
__snake_case = max_length_s
__snake_case = max_length_s * sampling_rate
__snake_case = sampling_rate
__snake_case = frequency_min
__snake_case = frequency_max
__snake_case = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm=UpperCAmelCase_ , mel_scale='htk' , )
__snake_case = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCAmelCase_ , min_frequency=UpperCAmelCase_ , max_frequency=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , norm='slaney' , mel_scale='slaney' , )
def _a ( self) -> Tuple:
__snake_case = copy.deepcopy(self.__dict__)
__snake_case = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self , lowercase_ , lowercase_ = None) -> Dict:
__snake_case = spectrogram(
UpperCAmelCase_ , window_function(self.fft_window_size , 'hann') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCAmelCase_ , log_mel='dB' , )
return log_mel_spectrogram.T
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
__snake_case = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
__snake_case = [0]
# randomly choose index for each part
__snake_case = np.random.choice(ranges[0])
__snake_case = np.random.choice(ranges[1])
__snake_case = np.random.choice(ranges[2])
__snake_case = mel[idx_front : idx_front + chunk_frames, :]
__snake_case = mel[idx_middle : idx_middle + chunk_frames, :]
__snake_case = mel[idx_back : idx_back + chunk_frames, :]
__snake_case = torch.tensor(mel[None, None, :])
__snake_case = torch.nn.functional.interpolate(
UpperCAmelCase_ , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=UpperCAmelCase_)
__snake_case = mel_shrink[0][0].numpy()
__snake_case = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0)
return mel_fusion
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Tuple:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__snake_case = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__snake_case = len(UpperCAmelCase_) - max_length
__snake_case = np.random.randint(0 , overflow + 1)
__snake_case = waveform[idx : idx + max_length]
__snake_case = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
__snake_case = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters)
__snake_case = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__snake_case = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__snake_case = np.stack([mel, mel, mel, mel] , axis=0)
__snake_case = False
else:
__snake_case = self._random_mel_fusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
__snake_case = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented")
else:
__snake_case = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__snake_case = int(max_length / len(UpperCAmelCase_))
__snake_case = np.stack(np.tile(UpperCAmelCase_ , n_repeat + 1))[:max_length]
if padding == "repeatpad":
__snake_case = int(max_length / len(UpperCAmelCase_))
__snake_case = np.stack(np.tile(UpperCAmelCase_ , UpperCAmelCase_))
__snake_case = np.pad(UpperCAmelCase_ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0)
if truncation == "fusion":
__snake_case = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters)
__snake_case = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0)
else:
__snake_case = self._np_extract_fbank_features(UpperCAmelCase_ , self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> Any:
__snake_case = truncation if truncation is not None else self.truncation
__snake_case = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__snake_case = isinstance(UpperCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__snake_case = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__snake_case = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray):
__snake_case = np.asarray(UpperCAmelCase_ , dtype=np.floataa)
elif isinstance(UpperCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__snake_case = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__snake_case = [np.asarray(UpperCAmelCase_)]
# convert to mel spectrogram, truncate and pad if needed.
__snake_case = [
self._get_input_mel(UpperCAmelCase_ , max_length if max_length else self.nb_max_samples , UpperCAmelCase_ , UpperCAmelCase_)
for waveform in raw_speech
]
__snake_case = []
__snake_case = []
for mel, longer in padded_inputs:
input_mel.append(UpperCAmelCase_)
is_longer.append(UpperCAmelCase_)
if truncation == "fusion" and sum(UpperCAmelCase_) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__snake_case = np.random.randint(0 , len(UpperCAmelCase_))
__snake_case = True
if isinstance(input_mel[0] , UpperCAmelCase_):
__snake_case = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
__snake_case = [[longer] for longer in is_longer]
__snake_case = {'input_features': input_mel, 'is_longer': is_longer}
__snake_case = BatchFeature(UpperCAmelCase_)
if return_tensors is not None:
__snake_case = input_features.convert_to_tensors(UpperCAmelCase_)
return input_features
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import inspect
import unittest
class __lowercase ( unittest.TestCase ):
def _a ( self) -> str:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _a ( self) -> str:
import diffusers
from diffusers.dependency_versions_table import deps
__snake_case = inspect.getmembers(UpperCamelCase__ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__snake_case = '''k-diffusion'''
elif backend == "invisible_watermark":
__snake_case = '''invisible-watermark'''
assert backend in deps, F"{backend} is not in the deps table!"
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''speech_to_text'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase_=1_0_0_0_0 , lowercase_=1_2 , lowercase_=2_0_4_8 , lowercase_=4 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=4 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=2 , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=6_0_0_0 , lowercase_=1_0_2_4 , lowercase_=2 , lowercase_=(5, 5) , lowercase_=1_0_2_4 , lowercase_=8_0 , lowercase_=1 , **lowercase_ , ) -> Tuple:
__snake_case = vocab_size
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = max_source_positions
__snake_case = max_target_positions
__snake_case = num_conv_layers
__snake_case = list(_SCREAMING_SNAKE_CASE)
__snake_case = conv_channels
__snake_case = input_feat_per_channel
__snake_case = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
def A ( snake_case__ : List[Any] , snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
__snake_case = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__snake_case = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__snake_case = True
if a[i].islower():
__snake_case = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
def A ( snake_case__ : Any ) -> Any:
'''simple docstring'''
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def A ( snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__snake_case , __snake_case = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__snake_case = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
UpperCAmelCase__ : int = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ : Any = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCAmelCase__ : Optional[int] = 6_37_81_37.0
UpperCAmelCase__ : Tuple = 6_35_67_52.31_42_45
UpperCAmelCase__ : Tuple = 6_37_81_37
def A ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ) -> float:
'''simple docstring'''
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
__snake_case = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
__snake_case = radians(_lowerCAmelCase )
__snake_case = radians(_lowerCAmelCase )
# Equation
__snake_case = sin((phi_a - phi_a) / 2 )
__snake_case = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__snake_case = sqrt(sin_sq_phi + (cos(_lowerCAmelCase ) * cos(_lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=2 , lowercase_=3 , lowercase_=4 , lowercase_=2 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_6 , lowercase_=3 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=6 , lowercase_=6 , lowercase_=3 , lowercase_=4 , lowercase_=None , lowercase_=1_0_0_0 , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = patch_size
__snake_case = text_seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
__snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__snake_case = text_seq_length
__snake_case = (image_size // patch_size) ** 2 + 1
__snake_case = self.text_seq_length + self.image_seq_length
def _a ( self) -> str:
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
__snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.text_seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
__snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = LayoutLMvaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
# text + image
__snake_case = model(lowercase_ , pixel_values=lowercase_)
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
__snake_case = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_)
__snake_case = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
__snake_case = model(pixel_values=lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self.num_labels
__snake_case = LayoutLMvaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
__snake_case = self.num_labels
__snake_case = LayoutLMvaForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = LayoutLMvaForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _a ( self) -> Tuple:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Tuple:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _a ( self) -> List[str]:
__snake_case = LayoutLMvaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> str:
__snake_case = copy.deepcopy(lowercase_)
if model_class in get_values(lowercase_):
__snake_case = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(lowercase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_):
__snake_case = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in get_values(lowercase_):
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
elif model_class in [
*get_values(lowercase_),
]:
__snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def _a ( self) -> List[str]:
self.config_tester.run_common_tests()
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Dict:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
@slow
def _a ( self) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LayoutLMvaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> int:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> Tuple:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_) if is_vision_available() else None
@slow
def _a ( self) -> int:
__snake_case = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base').to(lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').pixel_values.to(lowercase_)
__snake_case = torch.tensor([[1, 2]])
__snake_case = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
__snake_case = model(
input_ids=input_ids.to(lowercase_) , bbox=bbox.to(lowercase_) , pixel_values=pixel_values.to(lowercase_) , )
# verify the logits
__snake_case = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape , lowercase_)
__snake_case = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4))
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
UpperCAmelCase__ : str = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
UpperCAmelCase__ : List[Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
UpperCAmelCase__ : List[str] = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
UpperCAmelCase__ : Tuple = BeautifulSoup(res.text, "html.parser")
UpperCAmelCase__ : int = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowercase ( __lowercase ):
__UpperCAmelCase = '''philschmid/bart-large-cnn-samsum'''
__UpperCAmelCase = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
__UpperCAmelCase = '''summarizer'''
__UpperCAmelCase = AutoTokenizer
__UpperCAmelCase = AutoModelForSeqaSeqLM
__UpperCAmelCase = ['''text''']
__UpperCAmelCase = ['''text''']
def _a ( self , lowercase_) -> str:
return self.pre_processor(__a , return_tensors='pt' , truncation=__a)
def _a ( self , lowercase_) -> List[Any]:
return self.model.generate(**__a)[0]
def _a ( self , lowercase_) -> int:
return self.pre_processor.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a)
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
from torch import nn
def A ( snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowercase ( __a ):
__UpperCAmelCase = '''bert'''
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> List[Any]:
super().__init__(pad_token_id=A__ , **A__)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class __lowercase ( __a ):
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase__ : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Optional[Any]:
super().__init__()
__snake_case = torchvision.models.resnetaaa(pretrained=lowercase_)
__snake_case = list(model.children())[:-2]
__snake_case = nn.Sequential(*lowercase_)
__snake_case = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def _a ( self , lowercase_) -> List[str]:
__snake_case = self.pool(self.model(lowercase_))
__snake_case = torch.flatten(lowercase_ , start_dim=2)
__snake_case = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = [json.loads(lowercase_) for l in open(lowercase_)]
__snake_case = os.path.dirname(lowercase_)
__snake_case = tokenizer
__snake_case = labels
__snake_case = len(lowercase_)
__snake_case = max_seq_length
__snake_case = transforms
def __len__( self) -> Any:
return len(self.data)
def __getitem__( self , lowercase_) -> Any:
__snake_case = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=lowercase_))
__snake_case , __snake_case , __snake_case = sentence[0], sentence[1:-1], sentence[-1]
__snake_case = sentence[: self.max_seq_length]
__snake_case = torch.zeros(self.n_classes)
__snake_case = 1
__snake_case = Image.open(os.path.join(self.data_dir , self.data[index]['img'])).convert('RGB')
__snake_case = self.transforms(lowercase_)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self) -> List[str]:
__snake_case = Counter()
for row in self.data:
label_freqs.update(row['label'])
return label_freqs
def A ( snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = [len(row['sentence'] ) for row in batch]
__snake_case , __snake_case = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
__snake_case = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
__snake_case = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
__snake_case = input_row['sentence']
__snake_case = 1
__snake_case = torch.stack([row['image'] for row in batch] )
__snake_case = torch.stack([row['label'] for row in batch] )
__snake_case = torch.stack([row['image_start_token'] for row in batch] )
__snake_case = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
from typing import Any
def A ( snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
if not input_list:
return []
__snake_case = [input_list.count(lowercase__ ) for value in input_list]
__snake_case = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : str = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ['YolosFeatureExtractor']
UpperCAmelCase__ : Dict = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase__ : int = "."
if __name__ == "__main__":
UpperCAmelCase__ : Dict = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase__ : int = line.strip()
UpperCAmelCase__ : Any = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase__ : Optional[int] = "\n".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {"vocab_file": "sentencepiece.model"}
UpperCAmelCase__ : Dict = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
UpperCAmelCase__ : str = {
"google/rembert": 2_56,
}
class __lowercase ( __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_="[CLS]" , lowercase_="[SEP]" , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , **lowercase_ , ) -> Tuple:
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(__snake_case)
@property
def _a ( self) -> int:
return len(self.sp_model)
def _a ( self) -> str:
__snake_case = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> str:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self , lowercase_) -> Dict:
__snake_case = d
__snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def _a ( self , lowercase_ , lowercase_=False) -> Dict:
__snake_case = self.sp_model.EncodeAsPieces(__snake_case)
return pieces
def _a ( self , lowercase_) -> Tuple:
return self.sp_model.PieceToId(__snake_case)
def _a ( self , lowercase_) -> str:
return self.sp_model.IdToPiece(__snake_case)
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case = self.sp_model.decode_pieces(__snake_case)
return out_string
def _a ( self , lowercase_ , lowercase_ = None) -> str:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = False) -> str:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1]
return [1] + ([0] * len(__snake_case)) + [1]
def _a ( self , lowercase_ , lowercase_ = None) -> str:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _a ( self , lowercase_ , lowercase_ = None) -> Union[str, Any]:
if not os.path.isdir(__snake_case):
logger.error('Vocabulary path ({}) should be a directory'.format(__snake_case))
return
__snake_case = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case):
copyfile(self.vocab_file , __snake_case)
return (out_vocab_file,)
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = "dpr"
def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_ = 0 , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , **lowercase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = projection_dim
__snake_case = position_embedding_type
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase : Dict = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
lowerCAmelCase : Optional[Any] = {
"camembert-base": 5_12,
}
lowerCAmelCase : Union[str, Any] = "▁"
class __lowercase ( _UpperCamelCase ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = CamembertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(__a , lstrip=__a , rstrip=__a) if isinstance(__a , __a) else mask_token
super().__init__(
__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(__a):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case = os.path.join(
__a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file , __a)
return (out_vocab_file,)
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase = "convnextv2"
def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=4 , lowercase_=None , lowercase_=None , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0.0 , lowercase_=2_2_4 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[Any]:
super().__init__(**_snake_case)
__snake_case = num_channels
__snake_case = patch_size
__snake_case = num_stages
__snake_case = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
__snake_case = [3, 3, 9, 3] if depths is None else depths
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = drop_path_rate
__snake_case = image_size
__snake_case = ['stem'] + [F"stage{idx}" for idx in range(1 , len(self.depths) + 1)]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names)
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.